Esempio n. 1
0
def main():
    tar_upload(cfg.OSS_BUCKET, 'worker_package', cfg.PACKAGE_PATH)
    batch_clnt = Client(cfg.REGION, cfg.ID, cfg.KEY)
    job = batch_clnt.create_job(get_job_desc())
    t = 10
    print 'Sleep %s second, please wait.' % t
    time.sleep(t)
    try:
        while (True):
            jobId = job.JobId
            job_detail = batch_clnt.get_job(jobId)
            if job_detail.State in ['Waiting', 'Running']:
                print('Job %s is now %s' % (job, job_detail.State))
                time.sleep(10)
                #listenRes()
                continue
            else:
                # 'Failed', 'Stopped', 'Finished'
                print('Job %s is now %s' % (job, job_detail.State))
                if job_detail.State == 'Finished':
                    if oss_clnt.exists(cfg.OSS_BUCKET, cfg.REQUEST_NAME):
                        oss_clnt.download(cfg.OSS_BUCKET, "aaa.txt",
                                          cfg.REQUEST_NAME)
                    print('Finished')
                break
    except ClientError, e:
        print(e)
def main():
    upload_worker(cfg.OSS_BUCKET, 'worker_package', cfg.PACKAGE_PATH)

    # Submit job to batch compute.
    clnt = Client(cfg.REGION, cfg.ID, cfg.KEY)
    job_desc = get_job_desc(cfg.FULL_PACKAGE)
    job = clnt.create_job(job_desc)

    t = 10
    print('Sleep %s second, please wait.'%t)
    time.sleep(t)

    # Wait for jobs terminated.
    while(True):
        s = clnt.get_job(job)
        if s.State in ['Waiting', 'Running']:
            print('Job %s is now %s'%(job, s.State))
            time.sleep(3)
            continue
        else:
            # 'Failed', 'Stopped', 'Finished'
            print('Job %s is now %s'%(job, s.State))
            if s.State == 'Finished':
                result = oss_clnt.download_str(cfg.OSS_BUCKET, cfg.OUTPUT_PATH)
                # Print out all prime numbers from 0 to 10000.
                print result.splitlines()
            break
    clnt.delete_job(job)
Esempio n. 3
0
def main():
    tar_upload(cfg.OSS_BUCKET, 'worker_package', cfg.PACKAGE_PATH)
    batch_clnt = Client(cfg.REGION, cfg.ID, cfg.KEY)
    job=batch_clnt.create_job(get_job_desc())
    if not oss_clnt.exists(cfg.OSS_BUCKET,cfg.REQUEST_NAME):
        oss_clnt.upload_str(cfg.OSS_BUCKET,"This is a test!",cfg.REQUEST_NAME)
    t = 10
    print 'Sleep %s second, please wait.'%t
    time.sleep(t)
    try:
    	while(True):
            jobId=job.JobId
            job_detail = batch_clnt.get_job(jobId)
            if job_detail.State in ['Waiting', 'Running']:
                print('Job %s is now %s'%(job, job_detail.State))
                time.sleep(10)
                listenRes()
                continue
            else:
                # 'Failed', 'Stopped', 'Finished'
            	print('Job %s is now %s'%(job, job_detail.State))
            	if job_detail.State == 'Finished':
            	    #oss_clnt.download(cfg.OSS_BUCKET,'/Users/trucy/luzj/test_ud/awq1.txt',cfg.RESPONSE_NAME)
            	    print('Finished')
            	break
    except ClientError, e:
        print(e)  
Esempio n. 4
0
    def setUp(self):
        self.endpoint = CN_QINGDAO
        self.access_key_id = ""
        self.access_key_secret = ""
        self.image_id = ''

        self.client = Client(self.endpoint,
                             self.access_key_id,
                             self.access_key_secret,
                             human_readable=True)
        self.job_id = None
Esempio n. 5
0
def create_cluster(client: Client, cluster_desc_str):
    cluster_desc = ClusterDescription(json.loads(cluster_desc_str))
    res = get_cluster(client, None, cluster_desc.Name)
    if res:
        return res
    res = client.create_cluster(cluster_desc)
    return json.loads(str(res))
Esempio n. 6
0
def get_cluster(client: Client, cluster_id, cluster_name):
    if cluster_id:
        res = client.get_cluster(cluster_id)
        return json.loads(str(res))
    res = list_clusters(client)
    for cls in res:
        if cls["Name"] == cluster_name:
            return cls
    return None
Esempio n. 7
0
    def setUp(self):
        self.endpoint = CN_QINGDAO
        self.access_key_id = ""
        self.access_key_secret = ""
        self.image_id = ''

        self.client = Client(self.endpoint, self.access_key_id,
                        self.access_key_secret, human_readable=True)
        self.job_id = None
Esempio n. 8
0
def create_app(client: Client, app_desc_str):
    app_desc = AppDescription(json.loads(app_desc_str))
    try:
        res = get_app(client, app_desc.Name)
        return res
    except ClientError as e:
        if e.status != 404:
            raise e
    res = client.create_app(app_desc)
    return json.loads(str(res))
Esempio n. 9
0
def list_cluster_instances(client: Client, cluster_id, group_id):
    marker = ""
    instances = []
    while True:
        res = client.list_cluster_instances(cluster_id, group_id, marker, 100)
        for instance in res.Items:
            instances.append(json.loads(str(instance)))
        if res.NextMarker == "":
            break
        marker = res.NextMarker
    return instances
Esempio n. 10
0
def list_clusters(client: Client):
    marker = ""
    clusters = []
    while True:
        res = client.list_clusters(marker, 100)
        for cluster in res.Items:
            clusters.append(json.loads(str(cluster)))
        if res.NextMarker == "":
            break
        marker = res.NextMarker
    return clusters
def main():
    upload_worker(cfg.OSS_BUCKET, 'worker_package', cfg.PACKAGE_PATH)

    # Submit job to batch compute.
    clnt = Client(cfg.REGION, cfg.ID, cfg.KEY)
    job_desc = get_job_desc(cfg.FULL_PACKAGE)
    job = clnt.create_job(job_desc)
    t = 10
    print 'Sleep %s second, please wait.' % t
    time.sleep(t)

    # Wait for job Terminated.
    while (True):
        s = clnt.get_job(job)
        if s.State in ['Waiting', 'Running']:
            print('Job %s is now %s' % (job, s.State))
            time.sleep(3)
            continue
        else:
            # 'Failed', 'Stopped', 'Finished'
            print('Job %s is now %s' % (job, s.State))
            if s.State == 'Finished':
                result = oss_clnt.download_str(cfg.OSS_BUCKET, cfg.OUTPUT_PATH)
                # Print out the count of prime numbers from 0 to 10000.
                print('Total %s prime numbers from %s to %s.' %
                      (result, cfg.DATA_START, cfg.DATA_END))
            break
    # Release job from batchcompute.
    clnt.delete_job(job)
Esempio n. 12
0
def main():
    upload_worker(cfg.OSS_BUCKET, 'worker_package', cfg.PACKAGE_PATH)

    # Submit job to batch compute.
    clnt = Client(cfg.REGION, cfg.ID, cfg.KEY)
    job_json = get_job_desc(cfg.FULL_PACKAGE)
    job = clnt.create_job(job_json)

    t = 10
    print('Sleep %s second, please wait.'%t)
    time.sleep(t)

    # Wait for jobs terminated.
    while(True):
        s = clnt.get_job(job)
        if s.State in ['Waiting', 'Running']:
            print('Job %s is now %s'%(job, s.State))
            time.sleep(3)
            continue
        else:
            # 'Failed', 'Stopped', 'Finished'
            print('Job %s is now %s'%(job, s.State))
            if s.State == 'Finished':
                oss_clnt = SimpleOss(cfg.OSS_HOST, cfg.ID, cfg.KEY)
                result = oss_clnt.download_str(cfg.OSS_BUCKET, cfg.OUTPUT_PATH)
                # Print out all prime numbers from 0 to 10000.
                print result.splitlines()
            break
    clnt.delete_job(job)
def main():
    upload_worker(cfg.OSS_BUCKET, 'worker_package', cfg.PACKAGE_PATH)

    # Submit job to batch compute.
    clnt = Client(cfg.REGION, cfg.ID, cfg.KEY)
    job_json = get_job_desc(cfg.FULL_PACKAGE)
    job = clnt.create_job(job_json)

    t = 5
    print 'Sleep %s second, please wait.'%t
    time.sleep(t)

    # Wait for job terminated.
    while(True):
        s = clnt.get_job(job)
        if s.State in ['Waiting']:
            print('Job %s is now %s'%(job, s.State))
            time.sleep(10)
            t+=1
            continue
        elif s.State in['Running']:
            print "Waiting%s s"%t
            t=0
            print('Job %s is now %s'%(job, s.State))
            time.sleep(10)
            t+=1
        else:
            # 'Failed', 'Stopped', 'Finished'
            print('Job %s is now %s'%(job, s.State))
            print "running%s s"%t
            if s.State == 'Finished':
                # Print out total prime numbers from 0 to 10000.
                result = oss_clnt.download_str(cfg.OSS_BUCKET, cfg.COUNT_OUTPUT_PATH)
                print('Total %s prime numbers from %s to %s.'%(result, 1, 10000))
            break
    clnt.delete_job(job)
def main():
    upload_worker(cfg.OSS_BUCKET, 'worker_package', cfg.PACKAGE_PATH)

    # Submit job to batch compute.
    clnt = Client(cfg.REGION, cfg.ID, cfg.KEY)
    job_json = get_job_desc(cfg.FULL_PACKAGE)
    job = clnt.create_job(job_json)

    t = 5
    print 'Sleep %s second, please wait.' % t
    time.sleep(t)

    # Wait for job terminated.
    while (True):
        s = clnt.get_job(job)
        if s.State in ['Waiting']:
            print('Job %s is now %s' % (job, s.State))
            time.sleep(10)
            t += 1
            continue
        elif s.State in ['Running']:
            print "Waiting%s s" % t
            t = 0
            print('Job %s is now %s' % (job, s.State))
            time.sleep(10)
            t += 1
        else:
            # 'Failed', 'Stopped', 'Finished'
            print('Job %s is now %s' % (job, s.State))
            print "running%s s" % t
            if s.State == 'Finished':
                # Print out total prime numbers from 0 to 10000.
                result = oss_clnt.download_str(cfg.OSS_BUCKET,
                                               cfg.COUNT_OUTPUT_PATH)
                print('Total %s prime numbers from %s to %s.' %
                      (result, 1, 10000))
            break
    clnt.delete_job(job)
Esempio n. 15
0
def create_job(client: Client, job_desc_str):
    print(job_desc_str)
    job_desc = AppJobDescription(json.loads(job_desc_str))
    res = client.create_job(job_desc)
    return json.loads(str(res))
    },
    "Dependencies": {}
  },
  "Notification": {
    "Topic": {
      "Name": "",
      "Events": []
    }
  }
}"""

#Load config values
accid,acckey,endpoint,token = MNSCommon.LoadConfig()

#Instantiate batchcompute client
client=Client(REGION, accid, acckey)

# my_account, my_queue
my_account = Account(endpoint, accid, acckey, token)
if len(sys.argv) > 1:
 queue_name = sys.argv[1]
else:
 sys.stderr.write("Please specify queue name while executing the script. (python recvdelmessage.py <queue name>)")

base64 = False if len(sys.argv) > 2 and sys.argv[2].lower() == "false" else True
my_queue = my_account.get_queue(queue_name)
my_queue.set_encoding(base64)


#Read delete message until the queue is empty
#The receive message request uses the long polling mode, and the long polling time is 3 seconds by wait_seconds.
Esempio n. 17
0
def get_bcs_client():
    Client.register_region(REGION, ENDPOINT)

    return Client(ENDPOINT, ACCESS_ID, ACCESS_KEY, SECURITY_TOKEN)
Esempio n. 18
0
def get_bcs_client():
    ENDPOINT = "batchcompute.%s.aliyuncs.com" % REGION
    Client.register_region(REGION, ENDPOINT)
    return Client(ENDPOINT, ACCESS_KEY_ID, ACCESS_KEY_SECRET)
Esempio n. 19
0
#!/usr/bin/env python3

import argparse
import json
import uuid
import aksk

from batchcompute import Client
from batchcompute.resources import AppDescription
from batchcompute.resources import ClusterDescription
from batchcompute.resources import AppJobDescription
from batchcompute.core.exceptions import ClientError

Client.register_region("cn-north-2-gov-1",
                       "batchcompute.cn-north-2-gov-1.aliyuncs.com")

app_wps_tpl = """
{{
  "Name": "{app_name}",
  "EnvVars": {{}},
  "Description": "",
  "CommandLine": "c:/wps/wps_startup.bat",
  "VM": {{
    "ECSImageId": "img-windows-vpc"
  }},
  "Daemonize": true,
  "InputParameters": {{
    "Action": {{
      "Type": "String"
    }},
    "RequestId": {{
Esempio n. 20
0
def get_app(client: Client, app_name):
    res = client.get_app(app_name)
    return json.loads(str(res))
Esempio n. 21
0
class ClientE2ETest(unittest.TestCase):
    def setUp(self):
        self.endpoint = CN_QINGDAO
        self.access_key_id = ""
        self.access_key_secret = ""
        self.image_id = ''

        self.client = Client(self.endpoint,
                             self.access_key_id,
                             self.access_key_secret,
                             human_readable=True)
        self.job_id = None

    def _get_job_desc(self):
        job_desc = JobDescription()
        find_task = TaskDescription()
        res_desc = ResourceDescription()

        find_task.PackageUri = "oss://your-bucket/batch_python_sdk/worker.tar.gz"
        find_task.ProgramName = 'find_prime_worker.py'
        find_task.ProgramType = 'python'
        find_task.ImageId = self.image_id
        find_task.InstanceCount = 3
        find_task.EnvironmentVariables = {}
        find_task.StdoutRedirectPath = "oss://your-bucket/batch_python_sdk/logs/"
        find_task.StderrRedirectPath = "oss://your-bucket/batch_python_sdk/logs/"
        find_task.ResourceDescription = res_desc

        # Create count task.
        count_task = TaskDescription(find_task)
        count_task['InstanceCount'] = 1

        # Create task dag.
        task_dag = TaskDag()
        task_dag.add_task(task_name='Find', task=find_task)
        task_dag.add_task(task_name='Count', task=count_task)
        task_dag.Dependencies = {'Find': ['Count']}

        # count prime job description.
        job_desc.TaskDag = task_dag
        job_desc.JobName = 'PythonSDK'
        job_desc.Priority = 0
        return job_desc

    def tearDown(self):
        while True:
            if self.job_id and self.job_id in self.client.list_jobs():
                state = self.client.get_job(self.job_id).State
                if state in ['Waiting', 'Running']:
                    self.client.stop_job(self.job_id)
                else:
                    self.client.delete_job(self.job_id)
            else:
                break
        self.job_id = None

    def test_create_job(self):
        job_desc = self._get_job_desc()

        # Create jobs.
        job = self.client.create_job(job_desc)
        self.job_id = job.JobId

        self.assertTrue(self.job_id)

    def test_stop_job(self):
        job_desc = self._get_job_desc()

        job = self.client.create_job(job_desc)
        self.job_id = job.JobId
        state = self.client.get_job(job).State
        if state in ['Waiting', 'Running']:
            self.client.stop_job(job)
        self.assertRaises(ClientError, self.client.stop_job, job)

    def test_update_priority(self):
        old_priority = 100
        new_priority = 200

        job_desc = self._get_job_desc()
        job_desc.Priority = old_priority

        # Create jobs.
        job = self.client.create_job(job_desc)
        self.job_id = job.JobId
        status = self.client.get_job(job)
        self.assertEqual(status.Priority, old_priority)

        # update priority.
        self.assertRaises(ClientError, self.client.update_job_priority, job,
                          new_priority)
        try:
            self.client.update_job_priority(job, new_priority)
        except ClientError as e:
            code = e.get_code()
            msg = e.get_msg()
            request_id = e.get_requestid()
        else:
            self.assertFalse(False, 'ClientError should be raised')
        status = self.client.get_job(job)
        if status.State in ['Waiting', 'Running']:
            self.client.stop_job(job)
        self.client.update_job_priority(job, new_priority)
        status = self.client.get_job(job)
        self.assertEqual(status.Priority, new_priority)

    def test_start_job(self):
        job_desc = self._get_job_desc()
        job = self.client.create_job(job_desc)
        self.job_id = job.JobId

        self.assertRaises(ClientError, self.client.start_job, job)
        status = self.client.get_job(job)
        if status.State in ['Waiting', 'Running']:
            self.client.stop_job(job)

    def test_list_images(self):
        image_list = self.client.list_images()
        for img in image_list:
            self.assertTrue(hasattr(img, 'ImageId'))
            self.assertTrue(not hasattr(img, 'ResourceId'))
            print(img.ImageId)

    def test_list_tasks(self):
        job_desc = self._get_job_desc()
        job = self.client.create_job(job_desc)
        self.job_id = job.JobId

        task_list = self.client.list_tasks(self.job_id)
        for task in task_list:
            print(task.TaskName)
            for instance in task.InstanceList:
                print(instance.InstanceId)
Esempio n. 22
0
#encoding=utf-8
import sys
from batchcompute import Client, ClientError
from batchcompute import CN_BEIJING as REGION
from batchcompute.resources import (JobDescription, TaskDescription, DAG,
                                    AutoCluster)

ACCESS_KEY_ID = ''  # 填写您的AK
ACCESS_KEY_SECRET = ''  # 填写您的AK
IMAGE_ID = 'img-f5tcaqrmrllg4sjgngo00m'  #这里填写您的镜像ID
INSTANCE_TYPE = 'ecs.sn1.medium'  # 根据实际region支持的InstanceType 填写 ecs.s1.medium  ecs.sn1ne.2xlarge
LOG_PATH = 'oss://dongyu1009/blocking/logs/'  # 'oss://your-bucket/log-count/logs/' 这里填写您创建的错误反馈和task输出的OSS存储路径
INPUT_MOUNT = 'oss://dongyu1009/blocking/input/'
OUTPUT_MOUNT = 'oss://dongyu1009/blocking/temp2/'
client = Client(REGION, ACCESS_KEY_ID, ACCESS_KEY_SECRET)


def main(year):
    WORKER_PATH = 'oss://dongyu1009/blocking/program3.tar.gz'  # 'oss://your-bucket/log-count/log-count.tar.gz'  这里填写您上传的log-count.tar.gz的OSS存储路径
    try:
        job_desc = JobDescription()
        # Create split task.
        compute_task = TaskDescription()
        compute_task.AutoCluster.Configs.Networks.VPC.CidrBlock = "192.168.0.0/16"
        compute_task.AutoCluster.InstanceType = INSTANCE_TYPE
        compute_task.AutoCluster.ResourceType = "OnDemand"
        compute_task.AutoCluster.ImageId = IMAGE_ID
        # compute_task.AutoCluster.Configs.Networks.VPC.VpcId = "vpc-xxyyzz"  # 如果想要使用用户VPC功能,需要设置此字段
        compute_task.Parameters.Command.CommandLine = "python lucc_combine.py"
        compute_task.Parameters.Command.PackagePath = WORKER_PATH
        compute_task.Parameters.StdoutRedirectPath = LOG_PATH
Esempio n. 23
0
def get_bcs_client():
    global access_key_id, access_key_secret
    REGION = "cn-beijing"
    ENDPOINT = "batchcompute-vpc.%s.aliyuncs.com" % REGION
    Client.register_region(REGION, ENDPOINT)
    return Client(ENDPOINT, access_key_id, access_key_secret)
Esempio n. 24
0
def main():
    parser = argparse.ArgumentParser(formatter_class=lambda prog: argparse.
                                     RawTextHelpFormatter(prog, width=200),
                                     description="""example:
  python3 bc.py -c ~/.aksk/imm-dev -r cn-shanghai -a ListClusters | jq ".[] | \\"\\(.Id) \\(.Name)\\""
  python3 bc.py -c ~/.aksk/imm-dev -r cn-shanghai -a GetCluster --cluster-id cluster-id | jq ".Groups.workers | \\"\\(.ActualVMCount) \\(.InstanceType)\\""
  python3 bc.py -c ~/.aksk/imm-dev -r cn-shanghai -a GetCluster --cluster-name IMM_DEV_CLUSTER_CONVERT_WPS_default_20200408_hl | jq .
  python3 bc.py -c ~/.aksk/imm-dev -r cn-shanghai -a ListClusterInstances --cluster-id cluster-id --group-id group-id
  python3 bc.py -c ~/.aksk/imm-dev -r cn-shanghai -a GetApp --app-name IMM_DEV_APP_CONVERT_WPS_default_20200520_regression
  python3 bc.py -c ~/.aksk/imm-dev -r cn-shanghai -a CreateAppWPS --app-name IMM_DEV_APP_CONVERT_WPS_default_20200920_hl
  python3 bc.py -c ~/.aksk/imm-dev -r cn-shanghai -a CreateClusterWPS \\
    --cluster-name IMM_DEV_CLUSTER_CONVERT_WPS_default_20200920_hl \\
    --vpc-id=vpc-id \\
    --mount-entry-source=oss://imm-dev-cn-shanghai/zyb/pkgs/CONVERT/WPS/ \\
    --version=20200520 \\
    --vm-count=1
  python3 bc.py -c ~/.aksk/imm-dev -r cn-shanghai -a CreateJobWPS \\
    --app-name IMM_DEV_APP_CONVERT_WPS_default_20200520_hl \\
    --cluster-name IMM_DEV_CLUSTER_CONVERT_WPS_default_20200520_hl \\
    --parameters "{
  \\"TgtType\\": \\"vector\\",
  \\"TgtUri\\": \\"oss://imm-dev-hl-shanghai/1.docx/imm/vector\\",
  \\"SrcUri\\": \\"oss://imm-dev-hl-shanghai/1.docx\\",
  \\"StartPage\\": 1,
  \\"EndPage\\": 200,
  \\"MaxSheetRow\\": 1000,
  \\"MaxSheetCol\\": 100
}"
""")
    parser.add_argument("-i", "--access-key-id", help="access key id")
    parser.add_argument("-s", "--access-key-secret", help="access key secret")
    parser.add_argument("-c", "--credential", help="credential file")
    parser.add_argument("-r", "--region-id", help="region id")
    parser.add_argument("-a",
                        "--action",
                        help="action",
                        choices=[
                            "ListClusters", "GetCluster",
                            "ListClusterInstances", "GetApp", "CreateApp",
                            "CreateAppWPS", "CreateCluster",
                            "CreateClusterWPS", "CreateJobWPS"
                        ])
    parser.add_argument("--cluster-id", help="cluster id")
    parser.add_argument("--group-id", help="group id")
    parser.add_argument("--app-name", help="app name")
    parser.add_argument("--app-desc", help="app desc")
    parser.add_argument("--cluster-name", help="cluster name")
    parser.add_argument("--cluster-desc", help="cluster desc")
    parser.add_argument("--vpc-id", help="vpc id")
    parser.add_argument("--mount-entry-source", help="mount entry source")
    parser.add_argument("--version", help="version")
    parser.add_argument("--vm-count", help="vm count")
    parser.add_argument("--parameters", help="parameters")
    parser.add_argument("--instance-type",
                        default="ecs.sn2.medium",
                        help="instance type")
    args = parser.parse_args()
    if args.credential:
        args.access_key_id, args.access_key_secret = aksk.load_from_file(
            args.credential)
    client = Client("batchcompute.{}.aliyuncs.com".format(args.region_id),
                    args.access_key_id, args.access_key_secret)
    if args.action == "ListClusters":
        print(json.dumps(list_clusters(client)))
    elif args.action == "GetCluster":
        print(
            json.dumps(get_cluster(client, args.cluster_id,
                                   args.cluster_name)))
    elif args.action == "ListClusterInstances":
        print(
            json.dumps(
                list_cluster_instances(client, args.cluster_id,
                                       args.group_id)))
    elif args.action == "GetApp":
        print(json.dumps(get_app(client, args.app_name)))
    elif args.action == "CreateApp":
        print(json.dumps(create_app(client, args.app_desc)))
    elif args.action == "CreateCluster":
        print(json.dumps(create_cluster(client, args.cluster_desc)))
    elif args.action == "CreateAppWPS":
        print(json.dumps(create_app_wps(client, args.app_name)))
    elif args.action == "CreateClusterWPS":
        print(
            json.dumps(
                (create_cluster_wps(client, args.cluster_name, args.vpc_id,
                                    args.mount_entry_source, args.version,
                                    args.vm_count, args.instance_type))))
    elif args.action == "CreateJobWPS":
        print(
            json.dumps(
                create_job_wps(client, args.app_name, args.cluster_name,
                               args.parameters)))
    else:
        parser.print_help()
Esempio n. 25
0
class ClientE2ETest(unittest.TestCase):
    def setUp(self):
        self.endpoint = CN_QINGDAO
        self.access_key_id = ""
        self.access_key_secret = ""
        self.image_id = ''

        self.client = Client(self.endpoint, self.access_key_id,
                        self.access_key_secret, human_readable=True)
        self.job_id = None

    def _get_job_desc(self):
        job_desc = JobDescription()
        find_task = TaskDescription()
        res_desc = ResourceDescription()

        find_task.PackageUri = "oss://your-bucket/batch_python_sdk/worker.tar.gz"
        find_task.ProgramName = 'find_prime_worker.py'
        find_task.ProgramType = 'python'
        find_task.ImageId = self.image_id
        find_task.InstanceCount = 3
        find_task.EnvironmentVariables = {}
        find_task.StdoutRedirectPath = "oss://your-bucket/batch_python_sdk/logs/"
        find_task.StderrRedirectPath = "oss://your-bucket/batch_python_sdk/logs/"
        find_task.ResourceDescription = res_desc

        # Create count task. 
        count_task = TaskDescription(find_task)
        count_task['InstanceCount'] = 1

        # Create task dag.
        task_dag = TaskDag()
        task_dag.add_task(task_name='Find', task=find_task)
        task_dag.add_task(task_name='Count', task=count_task)
        task_dag.Dependencies = {
            'Find': ['Count']
        }

        # count prime job description.
        job_desc.TaskDag = task_dag
        job_desc.JobName = 'PythonSDK'
        job_desc.Priority = 0
        return job_desc

    def tearDown(self):
        while True:
            if self.job_id and self.job_id in self.client.list_jobs():
                state = self.client.get_job(self.job_id).State
                if state in ['Waiting', 'Running']:
                    self.client.stop_job(self.job_id)
                else:
                    self.client.delete_job(self.job_id)
            else:
                break
        self.job_id = None


    def test_create_job(self):
        job_desc = self._get_job_desc()

        # Create jobs.
        job = self.client.create_job(job_desc)
        self.job_id = job.JobId

        self.assertTrue(self.job_id)

    def test_stop_job(self):
        job_desc = self._get_job_desc()

        job = self.client.create_job(job_desc)
        self.job_id = job.JobId
        state = self.client.get_job(job).State
        if state in ['Waiting', 'Running']:
            self.client.stop_job(job)
        self.assertRaises(ClientError, self.client.stop_job, job)

    def test_update_priority(self):
        old_priority = 100
        new_priority = 200

        job_desc = self._get_job_desc()
        job_desc.Priority = old_priority

        # Create jobs.
        job = self.client.create_job(job_desc)
        self.job_id = job.JobId
        status = self.client.get_job(job)
        self.assertEqual(status.Priority, old_priority)

        # update priority.
        self.assertRaises(ClientError, self.client.update_job_priority, job, new_priority)
        try:
            self.client.update_job_priority(job, new_priority)
        except ClientError as e:
            code = e.get_code()
            msg = e.get_msg()
            request_id = e.get_requestid()
        else:
            self.assertFalse(False, 'ClientError should be raised')
        status = self.client.get_job(job)
        if status.State in ['Waiting', 'Running']:
            self.client.stop_job(job)
        self.client.update_job_priority(job, new_priority)
        status = self.client.get_job(job)
        self.assertEqual(status.Priority, new_priority)

    def test_start_job(self):
        job_desc = self._get_job_desc()
        job = self.client.create_job(job_desc)
        self.job_id = job.JobId

        self.assertRaises(ClientError, self.client.start_job, job)
        status = self.client.get_job(job)
        if status.State in ['Waiting', 'Running']:
            self.client.stop_job(job)

    def test_list_images(self):
        image_list = self.client.list_images()
        for img in image_list:
            self.assertTrue(hasattr(img, 'ImageId'))
            self.assertTrue(not hasattr(img, 'ResourceId'))
            print(img.ImageId)

    def test_list_tasks(self):
        job_desc = self._get_job_desc()
        job = self.client.create_job(job_desc)
        self.job_id = job.JobId

        task_list = self.client.list_tasks(self.job_id)
        for task in task_list:
            print(task.TaskName)
            for instance in task.InstanceList:
                print(instance.InstanceId)