コード例 #1
0
    def __init__(self, plugins=None):
        """User preference for each service.

        :param plugins: List of entry point namespaces to load.

        Create a new :class:`~openstack.profile.Profile`
        object with no preferences defined, but knowledge of the services.
        Services are identified by their service type, e.g.: 'identity',
        'compute', etc.
        """
        self._services = {}

        self._add_service(anti_ddos_service.AntiDDosService(version="v1"))
        self._add_service(block_store_service.BlockStoreService(version="v2"))
        self._add_service(compute_service.ComputeService(version="v2"))
        self._add_service(cts_service.CTSService(version="v1"))
        self._add_service(dms_service.DMSService(version="v1"))
        self._add_service(identity_service.IdentityService(version="v3"))
        self._add_service(image_service.ImageService(version="v2"))
        self._add_service(kms_service.KMSService(version="v1"))
        self._add_service(maas_service.MaaSService(version="v1"))
        self._add_service(network_service.NetworkService(version="v2.0"))
        self._add_service(
            orchestration_service.OrchestrationService(version="v1"))
        self._add_service(smn_service.SMNService(version="v2"))
        # QianBiao.NG HuaWei Services
        self._add_service(dns_service.DNSService(version="v2"))
        self._add_service(cloud_eye_service.CloudEyeService(version="v1"))
        ass = auto_scaling_service.AutoScalingService(version="v1")
        self._add_service(ass)
        vbs_v2 = volume_backup_service.VolumeBackupService(version="v2")
        self._add_service(vbs_v2)
        self._add_service(map_reduce_service.MapReduceService(version="v1"))
        self._add_service(evs_service.EvsServiceV2_1(version='v2.1'))
        self._add_service(evs_service.EvsService(version='v2'))
        self._add_service(ecs_service.EcsService(version='v1'))
        self._add_service(ecs_service.EcsServiceV1_1(version='v1.1'))
        self._add_service(vpc_service.VpcService(version='v2.0'))
        self._add_service(bms_service.BmsService(version='v1'))
        self._add_service(lb_service.LoadBalancerService(version='v1'))
        # not support below service
        # self._add_service(message_service.MessageService(version="v1"))
        # self._add_service(cluster_service.ClusterService(version="v1"))
        # self._add_service(database_service.DatabaseService(version="v1"))
        # self._add_service(alarm_service.AlarmService(version="v2"))
        # self._add_service(bare_metal_service.BareMetalService(version="v1"))
        # self._add_service(key_manager_service.KeyManagerService(version="v1"))
        # self._add_service(
        # object_store_service.ObjectStoreService(version="v1"))

        self._add_service(rds_service.RDSService(version="v1"))
        self._add_service(cdn_service.CDNService(version='v1'))

        # self._add_service(rds_os_service.RDSService(version="v1"))
        # self._add_service(telemetry_service.TelemetryService(version="v2"))
        # self._add_service(workflow_service.WorkflowService(version="v2"))
        if plugins:
            for plugin in plugins:
                self._load_plugin(plugin)
        self.service_keys = sorted(self._services.keys())
コード例 #2
0
class DataSource(resource.Resource):
    resource_key = "data_source"
    resources_key = "data_sources"
    base_path = "/data-sources"
    service = map_reduce_service.MapReduceService()

    # capabilities
    allow_create = True
    allow_update = True
    allow_list = True
    allow_get = True
    allow_delete = True

    _query_mapping = resource.QueryParameters("sort_by")

    #: Properties
    #: Data Source name
    name = resource.Body("name")
    #: Data Source Type, supports: ``hdfs``, ``obs``, ``swift``
    type = resource.Body("type")
    #: Data Source url, if type is HDFS, url should like */data-source-path*,
    #: if type is obs, url should like *s3a://data-source-path*
    url = resource.Body("url")
    #: Data source description
    description = resource.Body("description")
    #: Reserved attribute, is data-source protected
    is_protected = resource.Body("is_protected", type=bool)
    #: Reserved attribute, is data-source public
    is_public = resource.Body("is_public", type=bool)
    #: UTC date and time of the data-source created time
    created_at = resource.Body("created_at")
    #: UTC date and time of the data-source last updated time
    updated_at = resource.Body("updated_at")
    #: The tenant this data-source belongs to
    tenant_id = resource.Body("tenant_id")
コード例 #3
0
class JobBinary(resource.Resource):
    resource_key = "job_binary"
    resources_key = "binaries"
    base_path = "/job-binaries"
    service = map_reduce_service.MapReduceService()

    # capabilities
    allow_create = True
    allow_update = True
    allow_list = True
    allow_get = True
    allow_delete = True

    _query_mapping = resource.QueryParameters("sort_by")

    #: Properties
    #: Job Binary name
    name = resource.Body("name")
    #: Job Binary url
    url = resource.Body("url")
    #: Job Binary description
    description = resource.Body("description")
    #: Reserved attribute, is job binary protected
    is_protected = resource.Body("is_protected", type=bool)
    #: Reserved attribute, is job binary public
    is_public = resource.Body("is_public", type=bool)
    #: UTC date and time of the job-binary created time
    created_at = resource.Body("created_at")
    #: UTC date and time of the job-binary last updated time
    updated_at = resource.Body("updated_at")
    #: The tenant this job-binary belongs to
    tenant_id = resource.Body("tenant_id")
コード例 #4
0
 def test_service(self):
     sot = map_reduce_service.MapReduceService()
     self.assertEqual('map-reduce', sot.service_type)
     self.assertEqual('public', sot.interface)
     self.assertIsNone(sot.region)
     self.assertIsNone(sot.service_name)
     self.assertEqual(1, len(sot.valid_versions))
     self.assertEqual('v1', sot.valid_versions[0].module)
     self.assertEqual('v1', sot.valid_versions[0].path)
コード例 #5
0
class Version(resource.Resource):
    resource_key = 'version'
    resources_key = 'versions'
    base_path = '/'
    service = map_reduce_service.MapReduceService(
        version=map_reduce_service.MapReduceService.UNVERSIONED)

    # capabilities
    allow_list = True

    # Properties
    links = resource.Body('links')
    status = resource.Body('status')
コード例 #6
0
class Job(resource.Resource):
    resource_key = "job"
    resources_key = "jobs"
    base_path = "/jobs"
    service = map_reduce_service.MapReduceService()

    # capabilities
    allow_create = True
    allow_update = True
    patch_update = True
    allow_list = True
    allow_get = True
    allow_delete = True

    _query_mapping = resource.QueryParameters("sort_by")

    #: Properties
    #: Job name
    name = resource.Body("name")
    #: Job type, supports: ``MapReduce``, ``Spark``, ``Hive``, ``hql``,
    #: ``DistCp``, ``SparkScript``, ``SparkSql``
    type = resource.Body("type")
    #: A list of programs to be executed by the job
    mains = resource.Body("mains", type=list)
    #: A list of job-binaries required by the job
    libs = resource.Body("libs", type=list)
    #: Reserved attribute, user customer interfaces
    interface = resource.Body("interface", type=list)
    #: Job description
    description = resource.Body("description")
    #: Reserved attribute, is job protected
    is_protected = resource.Body("is_protected", type=bool)
    #: Reserved attribute, is job public
    is_public = resource.Body("is_public", type=bool)
    #: UTC date and time of the job created time
    created_at = resource.Body("created_at")
    #: UTC date and time of the job last updated time
    updated_at = resource.Body("updated_at")
    #: The tenant this job belongs to
    tenant_id = resource.Body("tenant_id")
コード例 #7
0
ファイル: cluster.py プロジェクト: chengff1/huawei-python-sdk
class Cluster(resource.Resource):
    """Cluster Resource"""
    resource_key = "cluster"
    resources_key = "cluster"
    base_path = "/clusters"
    service = map_reduce_service.MapReduceService()

    # capabilities
    allow_create = True
    allow_get = True
    allow_delete = True

    _query_mapping = resource.QueryParameters("sort_by", marker="limit")

    #: Properties
    #: Cluster ID
    id = resource.Body("cluster_id")
    #: Cluster name
    name = resource.Body("cluster_name")
    #: Version of the clusters, Currently, MRS 1.2 and MRS 1.3.0 are supported.
    #: The latest version(MRS 1.3.0 for now) of MRS is used by default.
    version = resource.Body("cluster_version")
    #: Cluster type, ``0`` indicate for analysis and ``1`` for streaming.
    #: 0 is used by default.
    type = resource.Body("cluster_type")

    #: Cluster billing type, The value is 12, indicating on-demand payment.
    billing_type = resource.Body("billing_type", type=int, default=12)
    #: Number of Master nodes, set to 2
    master_node_num = resource.Body("master_node_num", type=int, default=2)
    #: The Flavor of Master Node, Best match based on several years of
    #: commissioning experience. MRS supports nine specifications of hosts,
    #: and host specifications are determined by CPUs, memory, and disks.
    #: Master nodes support:
    #:  - c2.4xlarge.linux.mrs,
    #:  - s1.4xlarge.linux.mrs and
    #:  - s1.8xlarge.linux.mrs.
    #: Core nodes of a streaming cluster support:
    #:  - s1.xlarge.linux.mrs,
    #:  - c2.2xlarge.linux.mrs,
    #:  - c2.4xlarge.linux.mrs,
    #:  - s1.4xlarge.linux.mrs,
    #:  - s1.8xlarge.linux.mrs,
    #:  - d1.8xlarge.linux.mrs
    #: Core nodes of an analysis cluster support all specifications above.
    master_node_size = resource.Body("master_node_size")
    #: Number of Core nodes, Value range: 3 to 100
    core_node_num = resource.Body("core_node_num", type=int)
    #: Instance specification of a Core node Configuration method of this
    #: parameter is identical to that of master_node_size.
    core_node_size = resource.Body("core_node_size")

    #: Cluster region information, Obtain the value from
    #: https://docs.otc.t-systems.com/en-us/endpoint/index.html
    data_center = resource.Body("data_center")
    #: ID of an available zone. Obtain the value from
    #: https://docs.otc.t-systems.com/en-us/endpoint/index.html
    availability_zone_id = resource.Body("available_zone_id")

    #: VPC reference of cluster nodes networking
    vpc_id = resource.Body("vpc_id")
    #: Name of the VPC
    vpc_name = resource.Body("vpc")
    #: Subnet reference of cluster nodes networking
    subnet_id = resource.Body("subnet_id")
    #: Name of the subnet
    subnet_name = resource.Body("subnet_name")

    #: Type of volume, ``SATA``, ``SAS`` and ``SSD`` are supported.
    #:  - SATA: common I/O
    #:  - SAS: high-speed I/O
    #:  - SSD: super high-speed I/O
    volume_type = resource.Body("volume_type")
    #: Data volume size of a Core node, Value range: 100 GB to 32000 GB.
    #: Users can add disks to expand storage capacity when creating a cluster.
    #: There are the following scenarios:
    #: - Separation of data storage and computing: Data is stored in the
    #: OBS system. Costs of clusters are relatively low but computing
    #: performance is poor. The clusters can be deleted at any time. It is
    #: recommended when data computing is not frequently performed.
    #: - Integration of data storage and computing: Data is stored in the HDFS
    #: system. Costs of clusters are relatively high but computing performance
    #: is good. The clusters cannot be deleted in a short term.
    #: It is recommended when data computing is frequently performed.
    volume_size = resource.Body("volume_size")
    #: Name of a key pair used to login to the Master node in the cluster.
    keypair = resource.Body("node_public_cert_name")
    #: MRS cluster running mode, ``0`` indicate for ``Common Mode`` and ``1``
    #: for ``Safe Mode``.
    #: - 0 common mode: The value indicates that the Kerberos authentication
    # is disabled. Users can use all functions provided by the cluster.
    #: - 1: safe mode; The value indicates that the Kerberos authentication is
    #: enabled. Common users cannot use the file management or job; management
    #: functions of an MRS cluster and cannot view cluster resource usage or
    # the job records of Hadoop and Spark. To use these functions, the users
    #: must obtain the relevant permissions from the MRS Manager administrator
    safe_mode = resource.Body("safe_mode")
    #: Indicates the password of the MRS Manager administrator.
    cluster_admin_secret = resource.Body("cluster_admin_secret")
    #: Service component list to be used by the cluster.
    #: Component IDs supported by 1.3.0 include:
    #: - MRS 1.3.0_001: Hadoop
    #: - MRS 1.3.0_002: Spark
    #: - MRS 1.3.0_003: HBase
    #: - MRS 1.3.0_004: Hive
    #: - MRS 1.3.0_005: Hue
    #: - MRS 1.3.0_006: Kafka
    #: - MRS 1.3.0_007: Storm
    # Component IDs supported by MRS 1.2 include:
    #: - MRS 1.2_001: Hadoop
    #: - MRS 1.2_002: Spark
    #: - MRS 1.2_003: HBase
    #: - MRS 1.2_004: Hive
    #: - MRS 1.2_005: Hue
    component_list = resource.Body("component_list", type=list)
    #: job to be executed after cluster is ready
    jobs = resource.Body("add_jobs", type=list)
コード例 #8
0
ファイル: cluster.py プロジェクト: chengff1/huawei-python-sdk
class ClusterDetail(resource.Resource):
    """Cluster Detail Resource"""
    resource_key = "cluster"
    resources_key = "cluster"
    base_path = "/cluster_infos"
    service = map_reduce_service.MapReduceService()

    # capabilities
    allow_get = True

    #: Properties
    #: Cluster ID
    id = resource.Body("clusterId")
    #: Cluster name
    name = resource.Body("clusterName")
    #: Version of the clusters, Currently, MRS 1.2 and MRS 1.3.0 are supported.
    #: The latest version(MRS 1.3.0 for now) of MRS is used by default.
    version = resource.Body("clusterVersion")
    #: Cluster type, ``0`` indicate for analysis and ``1`` for streaming.
    #: 0 is used by default.
    type = resource.Body("clusterType")
    state = resource.Body("clusterState")

    #: Cluster billing type, The value is 12, indicating on-demand payment.
    billing_type = resource.Body("billingType", default=12)
    #: Number of Master nodes, set to 2
    master_node_num = resource.Body("masterNodeNum", type=int, default=2)
    #: The Flavor of Master Node, Best match based on several years of
    #: commissioning experience. MRS supports nine specifications of hosts,
    #: and host specifications are determined by CPUs, memory, and disks.
    #: Master nodes support:
    #:  - c2.4xlarge.linux.mrs,
    #:  - s1.4xlarge.linux.mrs and
    #:  - s1.8xlarge.linux.mrs.
    #: Core nodes of a streaming cluster support:
    #:  - s1.xlarge.linux.mrs,
    #:  - c2.2xlarge.linux.mrs,
    #:  - c2.4xlarge.linux.mrs,
    #:  - s1.4xlarge.linux.mrs,
    #:  - s1.8xlarge.linux.mrs,
    #:  - d1.8xlarge.linux.mrs
    #: Core nodes of an analysis cluster support all specifications above.
    master_node_size = resource.Body("masterNodeSize")
    #: Number of Core nodes, Value range: 3 to 100
    core_node_num = resource.Body("coreNodeNum", type=int)
    #: Instance specification of a Core node Configuration method of this
    #: parameter is identical to that of master_node_size.
    core_node_size = resource.Body("coreNodeSize")

    #: Cluster region information, Obtain the value from
    #: https://docs.otc.t-systems.com/en-us/endpoint/index.html
    data_center = resource.Body("dataCenter")
    #: ID of an available zone. Obtain the value from
    #: https://docs.otc.t-systems.com/en-us/endpoint/index.html
    availability_zone_id = resource.Body("azId")
    availability_zone = resource.Body("azName")

    #: Name of the VPC
    vpc_name = resource.Body("vpc")
    #: Name of the subnet
    subnet_name = resource.Body("subnetName")

    #: Type of volume, ``SATA``, ``SAS`` and ``SSD`` are supported.
    #:  - SATA: common I/O
    #:  - SAS: high-speed I/O
    #:  - SSD: super high-speed I/O
    volume_type = resource.Body("volumeType")
    #: Data volume size of a Core node, Value range: 100 GB to 32000 GB.
    #: Users can add disks to expand storage capacity when creating a cluster.
    #: There are the following scenarios:
    #: - Separation of data storage and computing: Data is stored in the
    #: OBS system. Costs of clusters are relatively low but computing
    #: performance is poor. The clusters can be deleted at any time. It is
    #: recommended when data computing is not frequently performed.
    #: - Integration of data storage and computing: Data is stored in the HDFS
    #: system. Costs of clusters are relatively high but computing performance
    #: is good. The clusters cannot be deleted in a short term.
    #: It is recommended when data computing is frequently performed.
    volume_size = resource.Body("volumeSize")
    #: Name of a key pair used to login to the Master node in the cluster.
    keypair = resource.Body("nodePublicCertName")
    #: MRS cluster running mode, ``0`` indicate for ``Common Mode`` and ``1``
    #: for ``Safe Mode``.
    #: - 0 common mode: The value indicates that the Kerberos authentication
    # is disabled. Users can use all functions provided by the cluster.
    #: - 1: safe mode; The value indicates that the Kerberos authentication is
    #: enabled. Common users cannot use the file management or job; management
    #: functions of an MRS cluster and cannot view cluster resource usage or
    # the job records of Hadoop and Spark. To use these functions, the users
    #: must obtain the relevant permissions from the MRS Manager administrator
    safe_mode = resource.Body("safeMode")
    #: Service component list to be used by the cluster.
    #: Component IDs supported by 1.3.0 include:
    #: - MRS 1.3.0_001: Hadoop
    #: - MRS 1.3.0_002: Spark
    #: - MRS 1.3.0_003: HBase
    #: - MRS 1.3.0_004: Hive
    #: - MRS 1.3.0_005: Hue
    #: - MRS 1.3.0_006: Kafka
    #: - MRS 1.3.0_007: Storm
    # Component IDs supported by MRS 1.2 include:
    #: - MRS 1.2_001: Hadoop
    #: - MRS 1.2_002: Spark
    #: - MRS 1.2_003: HBase
    #: - MRS 1.2_004: Hive
    #: - MRS 1.2_005: Hue
    component_list = resource.Body("componentList", type=list)

    safe_mode = resource.Body("safeMode")
    create_at = resource.Body("createAt")
    update_at = resource.Body("updateAt")
    duration = resource.Body("duration")
    fee = resource.Body("fee")
    hadoop_version = resource.Body("hadoopVersion")
    external_ip = resource.Body("externalIp")
    external_alternate_ip = resource.Body("externalAlternateIp")
    internal_ip = resource.Body("internalIp")
    deployment_id = resource.Body("deploymentId")
    remark = resource.Body("remark")
    order_id = resource.Body("orderId")
    master_node_product_id = resource.Body("masterNodeProductId")
    master_node_spec_id = resource.Body("masterNodeSpecId")
    core_node_product_id = resource.Body("coreNodeProductId")
    core_node_spec_id = resource.Body("coreNodeSpecId")
    instance_id = resource.Body("instanceId")
    vnc = resource.Body("vnc")
    tenant_id = resource.Body("tenantId")
    security_groups_id = resource.Body("securityGroupsId")
    slave_security_groups_id = resource.Body("slaveSecurityGroupsId")
    master_node_ip = resource.Body("masterNodeIp")
    private_ip_first = resource.Body("privateIpFirst")
    error_info = resource.Body("errorInfo")
    charging_start_time = resource.Body("chargingStartTime")
コード例 #9
0
class JobExecution(resource.Resource):
    """Map Reduce Job Execution Resource"""
    resource_key = "job_execution"
    resources_key = "job_executions"
    base_path = "/job-executions"
    service = map_reduce_service.MapReduceService()

    # capabilities
    allow_list = True
    allow_get = True
    allow_delete = True

    _query_mapping = resource.QueryParameters("sort_by")

    #: Properties
    #: A dict contains job running information returned by Oozie
    info = resource.Body("info", type=dict)
    #: The cluster which executed the job
    cluster_id = resource.Body("cluster_id")
    #: The job reference been executed
    job_id = resource.Body("job_id")
    #: Workflow ID of Oozie
    engine_job_id = resource.Body("engine_job_id")
    #: Workflow ID returned by Oozie
    oozie_job_id = resource.Body("oozie_job_id")
    #: Response code of job execution
    return_code = resource.Body("return_code")
    #: Input data reference(ID) of the job execution
    input_id = resource.Body("input_id")
    #: Output data reference(ID) of the job execution
    output_id = resource.Body("output_id")
    #: Job execution configurations
    job_configs = resource.Body("job_configs", type=dict)
    #: Input Data source dict of the job execution, key is input id and value
    #: is the input URL
    data_source_urls = resource.Body("data_source_urls")
    #: Reserved attribute, is job binary protected
    is_protected = resource.Body("is_protected", type=bool)
    #: Reserved attribute, is job binary public
    is_public = resource.Body("is_public", type=bool)
    #: UTC date and time of the job-execution start time
    start_time = resource.Body("start_time")
    #: UTC date and time of the job-execution end time
    end_time = resource.Body("end_time")
    #: UTC date and time of the job-execution created time
    created_at = resource.Body("created_at")
    #: UTC date and time of the job-execution last updated time
    updated_at = resource.Body("updated_at")
    #: The tenant this job-execution belongs to
    tenant_id = resource.Body("tenant_id")

    def cancel(self, session):
        """cancel self's execution

        :param session: openstack session
        :return:
        """
        uri = utils.urljoin(self.base_path, self.id, 'cancel')
        endpoint_override = self.service.get_endpoint_override()
        response = session.get(uri,
                               endpoint_filter=self.service,
                               endpoint_override=endpoint_override)
        self._translate_response(response)
        return self

    def create(self, session):
        """create a job execution and execute it

        :param session: openstack session
        :return:
        """
        uri = utils.urljoin("/jobs", self.job_id, '/execute')
        endpoint_override = self.service.get_endpoint_override()
        body = self._body.dirty
        response = session.post(uri,
                                headers={"Accept": "application/json"},
                                endpoint_filter=self.service,
                                endpoint_override=endpoint_override,
                                json=body)
        self._translate_response(response)
        return self
コード例 #10
0
ファイル: job_exe.py プロジェクト: chengff1/huawei-python-sdk
class JobExe(resource.Resource):
    """Map Reduce Job Exe Resource

    JobExe is not the same as :class: `~openstack.map_reduce.v1.job_execution.
    JobExecution`, It's an older version of job execution, we just implement
    it for backward compatible
    """
    resource_key = "job_execution"
    resources_key = "job_executions"
    base_path = "/job-exes"
    query_marker_key = "current_page"
    query_limit_key = "page_size"
    service = map_reduce_service.MapReduceService()

    # capabilities
    allow_list = True
    allow_get = True
    allow_delete = True

    _query_mapping = resource.QueryParameters("id", "cluster_id", "job_name",
                                              "state", "page_size",
                                              "current_page")

    #: Properties
    #: whether the job is generated by template
    templated = resource.Body("templated", type=bool)
    #: The cluster which executed the job
    cluster_id = resource.Body("cluster_id")
    #: The job id reference been executed
    job_id = resource.Body("job_id")
    #: The job name reference been executed
    job_name = resource.Body("job_name")
    #: The job type to be executed, valid values include:
    #: - 1: MapReduce
    #: - 2: Spark
    #: - 3: Hive Script
    #: - 4: HiveQL
    #: - 5: DistCp
    #: - 6: Spark Script
    #: - 7: Spark SQL
    job_type = resource.Body("job_type")
    #: Input data URL of the job execution
    input_id = resource.Body("input_id")
    #: Output data URL of the job execution
    output_id = resource.Body("output_id")
    #: Input data URL of the job execution
    input = resource.Body("input")
    #: Output data URL of the job execution
    output = resource.Body("output")
    #: Execute return code
    return_code = resource.Body("return_code")
    #: Oozie workflow id
    engine_job_id = resource.Body("engine_job_id")
    #: The job execution group id
    group_id = resource.Body("group_id")
    #: The job execution group id
    jar_path = resource.Body("jar_path")
    #: The job log path
    job_log = resource.Body("job_log")
    #: File action: ``import`` , ``export`` ?
    file_action = resource.Body("file_action")
    #: Key parameter for program execution. The parameter is specified by the
    #: function of the user's internal program. MRS is only responsible for
    #: loading the parameter. This parameter can be empty.
    arguments = resource.Body("arguments")
    #: HiveQL statement
    hql = resource.Body("hql")
    #: Job status code, valid values include:
    #: - -1: Terminated
    #: - 1: Starting
    #: - 2: Running
    #: - 3: Completed
    #: - 4: Abnormal
    #: - 5: Error
    job_state = resource.Body("job_state")
    #: Job final status, valid values include:
    #: - 0: unfinished
    #: - 1: terminated due to an execution error
    #: - 2: executed successfully
    #: - 3: canceled
    job_final_status = resource.Body("job_final_status")
    #: Address of the Hive script
    hive_script_path = resource.Body("hive_script_path")
    #: User ID for creating jobs
    create_by = resource.Body("create_by")
    #: User ID for updating jobs
    update_by = resource.Body("update_by")
    #: Number of completed steps
    finished_step = resource.Body("finished_step")
    #: Main ID of a job
    job_main_id = resource.Body("job_main_id")
    #: Step ID of a job
    job_step_id = resource.Body("job_step_id")
    #: 	Delay time, which is a 13-bit timestamp.
    postpone_at = resource.Body("postpone_at")
    #: Step name of a job
    step_name = resource.Body("step_name")
    #: Number of steps
    step_num = resource.Body("step_num")
    #: Number of tasks
    task_num = resource.Body("task_num")
    #: Duration of job execution (unit: s)
    spend_time = resource.Body("spend_time")
    #: Step sequence of a job
    step_seq = resource.Body("step_seq")
    #: Job execution progress
    progress = resource.Body("progress")
    #: authentication token, not support for now
    credentials = resource.Body("credentials")
    #: create by
    user_id = resource.Body("user_id")
    #: Key-value pair set for saving job running configurations
    job_configs = resource.Body("job_configs")
    #: Authentication information
    extra = resource.Body("extra")
    #: Data source URL of a job
    data_source_urls = resource.Body("data_source_urls")
    #: Key-value pair set, containing job running information returned by Oozie
    info = resource.Body("info")
    #: UTC date and time of the job-execution start time
    start_time = resource.Body("start_time")
    #: UTC date and time of the job-execution end time
    end_time = resource.Body("end_time")
    #: UTC date and time of the job-execution created time
    create_at = resource.Body("create_at")
    #: UTC date and time of the job-execution last updated time
    update_at = resource.Body("update_at")
    #: UTC date and time of the job-execution created time
    created_at = resource.Body("created_at")
    #: UTC date and time of the job-execution last updated time
    updated_at = resource.Body("updated_at")
    #: The tenant this job-execution belongs to
    tenant_id = resource.Body("tenant_id")
    #: Reserved attribute, is job binary protected
    is_protected = resource.Body("is_protected", type=bool)
    #: Reserved attribute, is job binary public
    is_public = resource.Body("is_public", type=bool)

    @classmethod
    def get_next_marker(cls, response_json, yielded, query_params):
        page_size = query_params.get("page_size", 1)
        if yielded < page_size:
            return -1

        current_page = int(query_params.get("current_page", 1))
        return current_page + 1

    def execute(self, session):
        """execute a job-exe

        :param session: openstack session
        :return:
        """
        endpoint_override = self.service.get_endpoint_override()
        body = self._body.dirty
        response = session.post("/jobs/submit-job",
                                headers={},
                                endpoint_filter=self.service,
                                endpoint_override=endpoint_override,
                                json=body)
        self._translate_response(response)
        return self