예제 #1
0
def create_data_source_variable(cluster_id, cr):
    """
    Creates a data source variable .json file using the cluster_id of an EMR cluster_id
    @PARAM:  cluster_id:  ID of an EMR cluster
    return:  True if success, creates a file in the pwd 'default_emr.json'

    Object created should look like:

    HADOOP_DATA_SOURCE_NAME="emr_data_source"
    HADOOP_DATA_SOURCE_DISTRO="Cloudera CDH5.4-5.7"
    HADOOP_DATA_SOURCE_HOST="emr_master_dns_hostname"
    HADOOP_DATA_SOURCE_PORT=8020
    HADOOP_DATA_SOURCE_USER="******"
    HADOOP_DATA_SOURCE_GROUP="hadoop"
    HADOOP_DATA_SOURCE_JT_HOST="emr_master_dns_hostname"
    HADOOP_DATA_SOURCE_JT_PORT=8032
    CONNECTION_PARAMETERS='[{"key":"mapreduce.jobhistory.address", "value":"0.0.0.0:10020"}, ' \
                            '{"key":"mapreduce.jobhistory.webapp.address", "value":"cdh5hakerberosnn.alpinenow.local:19888"}, ' \
                            '{"key":"yarn.app.mapreduce.am.staging-dir", "value":"/tmp/hadoop-yarn/staging"}, ' \
                            '{"key":"yarn.resourcemanager.admin.address", "value":"cdh5hakerberosnn.alpinenow.local:8033"}, ' \
                            '{"key":"yarn.resourcemanager.resource-tracker.address", "value":"cdh5hakerberosnn.alpinenow.local:8031"}, ' \
                            '{"key":"yarn.resourcemanager.scheduler.address", "value":"cdh5hakerberosnn.alpinenow.local:8030"}]'

    """
    conn = EmrConnection(
        cr.get_config("aws_access_key"),
        cr.get_config("aws_secret_key"),
        region = RegionInfo(name = cr.get_config("aws_region"),
            endpoint = cr.get_config("aws_region") + ".elasticmapreduce.amazonaws.com" ))

    emr_cluster = conn.describe_cluster(cluster_id)
    master_dns_hostname = emr_cluster.masterpublicdnsname

    # Build up connection parameters
    conn_params = []
    conn_params.append({"key": "mapreduce.jobhistory.address", "value": "{0}:10020".format(master_dns_hostname)})
    conn_params.append({"key": "mapreduce.jobhistory.webapp.address", "value": "{0}:19888".format(master_dns_hostname)})
    conn_params.append({"key": "yarn.app.mapreduce.am.staging-dir", "value": "/user"})
    conn_params.append({"key": "yarn.resourcemanager.admin.address", "value": "{0}:8033".format(master_dns_hostname)})
    conn_params.append({"key": "yarn.resourcemanager.scheduler.address", "value": "{0}:8030".format(master_dns_hostname)})
    conn_params_str = "CONNECTION_PARAMETERS=\"{0}\"".format(conn_params)
    email_str = "EMAIL=\"avalanche_{0}.alpinenow.com\"".format(random.randint(1,99999))

    with open("emr_default.conf", "w") as f:
        f.writelines("HADOOP_DATA_SOURCE_NAME=\"{0}\"\n".format(cr.get_config("emr_cluster_name")))
        f.writelines("HADOOP_DATA_SOURCE_DISTRO=\"{0}\"\n".format("Amazon EMR5"))
        f.writelines("HADOOP_DATA_SOURCE_HOST=\"{0}\"\n".format(master_dns_hostname))
        f.writelines("HADOOP_DATA_SOURCE_POST=\"8020\"\n")
        f.writelines("HADOOP_DATA_SOURCE_USER=\"hdfs\"\n")
        f.writelines("HADOOP_DATA_SOURCE_GROUP=\"hadoop\"\n")
        f.writelines("HADOOP_DATA_SOURCE_JT_HOST=\"{0}\"\n".format(master_dns_hostname))
        f.writelines("HADOOP_DATA_SOURCE_JT_PORT=\"8032\"\n")
        f.writelines(email_str)
        f.writelines(conn_params_str)
예제 #2
0
class Rankmaniac:
    """
    (wrapper class)

    This class presents a simple wrapper around the AWS SDK. It strives
    to provide all the functionality required to run map-reduce
    (Hadoop) on Amazon. This way the students do not need to worry about
    learning the API for Amazon S3 and EMR, and instead can focus on
    computing pagerank quickly!
    """

    DefaultRegionName = 'us-west-2'
    DefaultRegionEndpoint = 'elasticmapreduce.us-west-2.amazonaws.com'

    def __init__(self,
                 team_id,
                 access_key,
                 secret_key,
                 bucket='cs144students'):
        """
        (constructor)

        Creates a new instance of the Rankmaniac class for a specific
        team using the provided credentials.

        Arguments:
            team_id       <str>     the team identifier, which may be
                                    differ slightly from the actual team
                                    name.

            access_key    <str>     the AWS access key identifier.
            secret_key    <str>     the AWS secret acess key.

        Keyword arguments:
            bucket        <str>     the S3 bucket name.
        """

        region = RegionInfo(None, self.DefaultRegionName,
                            self.DefaultRegionEndpoint)

        self._s3_bucket = bucket
        self._s3_conn = S3Connection(access_key, secret_key)
        self._emr_conn = EmrConnection(access_key, secret_key, region=region)

        self.team_id = team_id
        self.job_id = None

        self._reset()
        self._num_instances = 1

    def _reset(self):
        """
        Resets the internal state of the job and submission.
        """

        self._iter_no = 0
        self._infile = None
        self._last_outdir = None

        self._last_process_step_iter_no = -1
        self._is_done = False

    def __del__(self):
        """
        (destructor)

        Terminates the map-reduce job if any, and closes the connections
        to Amazon S3 and EMR.
        """

        if self.job_id is not None:
            self.terminate()

        self._s3_conn.close()
        self._emr_conn.close()

    def __enter__(self):
        """
        Used for `with` syntax. Simply returns this instance since the
        set-up has all been done in the constructor.
        """

        return self

    def __exit__(self, type, value, traceback):
        """
        Refer to __del__().
        """

        self.__del__()
        return False  # do not swallow any exceptions

    def upload(self, indir='data'):
        """
        Uploads the local data to Amazon S3 under the configured bucket
        and key prefix (the team identifier). This way the code can be
        accessed by Amazon EMR to compute pagerank.

        Keyword arguments:
            indir       <str>       the base directory from which to
                                    upload contents.

        Special notes:
            This method only uploads **files** in the specified
            directory. It does not scan through subdirectories.

            WARNING! This method removes all previous (or ongoing)
            submission results, so it is unsafe to call while a job is
            already running (and possibly started elsewhere).
        """

        if self.job_id is not None:
            raise RankmaniacError('A job is already running.')

        bucket = self._s3_conn.get_bucket(self._s3_bucket)

        # Clear out current bucket contents for team
        keys = bucket.list(prefix=self._get_keyname())
        bucket.delete_keys(keys)

        for filename in os.listdir(indir):
            relpath = os.path.join(indir, filename)
            if os.path.isfile(relpath):
                keyname = self._get_keyname(filename)
                key = bucket.new_key(keyname)
                key.set_contents_from_filename(relpath)

    def set_infile(self, filename):
        """
        Sets the data file to use for the first iteration of the
        pagerank step in the map-reduce job.
        """

        if self.job_id is not None:
            raise RankmaniacError('A job is already running.')

        self._infile = filename

    def do_iter(self,
                pagerank_mapper,
                pagerank_reducer,
                process_mapper,
                process_reducer,
                num_pagerank_mappers=1,
                num_pagerank_reducers=1):
        """
        Adds a pagerank step and a process step to the current job.
        """

        self.do_niter(1,
                      pagerank_mapper,
                      pagerank_reducer,
                      process_mapper,
                      process_reducer,
                      num_pagerank_mappers=num_pagerank_mappers,
                      num_pagerank_reducers=num_pagerank_reducers)

    def do_niter(self,
                 n,
                 pagerank_mapper,
                 pagerank_reducer,
                 process_mapper,
                 process_reducer,
                 num_pagerank_mappers=1,
                 num_pagerank_reducers=1):
        """
        Adds N pagerank steps and N process steps to the current job.
        """

        num_process_mappers = 1
        num_process_reducers = 1

        iter_no = self._iter_no
        last_outdir = self._last_outdir
        steps = []
        for _ in range(n):
            if iter_no == 0:
                pagerank_input = self._infile
            elif iter_no > 0:
                pagerank_input = last_outdir

            pagerank_output = self._get_default_outdir('pagerank', iter_no)

            # Output from the pagerank step becomes input to process step
            process_input = pagerank_output

            process_output = self._get_default_outdir('process', iter_no)

            pagerank_step = self._make_step(pagerank_mapper, pagerank_reducer,
                                            pagerank_input, pagerank_output,
                                            num_pagerank_mappers,
                                            num_pagerank_reducers)

            process_step = self._make_step(process_mapper, process_reducer,
                                           process_input, process_output,
                                           num_process_mappers,
                                           num_process_reducers)

            steps.extend([pagerank_step, process_step])

            # Store `process_output` directory so it can be used in
            # subsequent iteration
            last_outdir = process_output
            iter_no += 1

        if self.job_id is None:
            self._submit_new_job(steps)
        else:
            self._emr_conn.add_jobflow_steps(self.job_id, steps)

        # Store directory and so it can be used in subsequent iteration;
        # however, only do so after the job was submitted or the steps
        # were added in case an exception occurs
        self._last_outdir = last_outdir
        self._iter_no = iter_no

    def is_done(self, jobdesc=None):
        """
        Returns `True` if the map-reduce job is done, and `False`
        otherwise.

        For all process-step output files that have not been fetched,
        gets the first part of the output file, and checks whether its
        contents begins with the string 'FinalRank'.

        Keyword arguments:
            jobdesc     <boto.emr.JobFlow>      cached description of
                                                jobflow to use

        Special notes:
            WARNING! The usage of this method in your code requires that
            that you used the default output directories in all calls
            to do_iter().
        """

        # Cache the result so we can return immediately without hitting
        # any of the Amazon APIs
        if self._is_done:
            return True
        iter_no = self._get_last_process_step_iter_no(jobdesc=jobdesc)
        if iter_no < 0:
            return False
        i = self._last_process_step_iter_no

        while i < iter_no:
            i += 1
            outdir = self._get_default_outdir('process', iter_no=i)
            keyname = self._get_keyname(outdir, 'part-00000')

            bucket = self._s3_conn.get_bucket(self._s3_bucket)
            key = bucket.get_key(keyname)
            contents = ''

            if key is not None:
                contents = key.next()  # get first chunk of the output file
            if contents.startswith('FinalRank'):
                self._is_done = True  # cache result
                break

        self._last_process_step_iter_no = i

        return self._is_done

    def is_alive(self, jobdesc=None):
        """
        Checks whether the jobflow has completed, failed, or been
        terminated.

        Keyword arguments:
            jobdesc     <boto.emr.JobFlow>      cached description of
                                                jobflow to use

        Special notes:
            WARNING! This method should only be called **after**
            is_done() in order to be able to distinguish between the
            cases where the map-reduce job has outputted 'FinalRank'
            on its final iteration and has a 'COMPLETED' state.
        """

        if jobdesc is None:
            jobdesc = self.describe()

        if jobdesc["cluster"].status.state in ('TERMINATED_WITH_ERRORS',
                                               'TERMINATED'):
            return False

        return True

    def terminate(self):
        """
        Terminates a running map-reduce job.
        """

        if not self.job_id:
            raise RankmaniacError('No job is running.')

        self._emr_conn.terminate_jobflow(self.job_id)
        self.job_id = None

        self._reset()

    def download(self, outdir='results'):
        """
        Downloads the results from Amazon S3 to the local directory.

        Keyword arguments:
            outdir      <str>       the base directory to which to
                                    download contents.

        Special notes:
            This method downloads all keys (files) from the configured
            bucket for this particular team. It creates subdirectories
            as needed.
        """

        bucket = self._s3_conn.get_bucket(self._s3_bucket)
        keys = bucket.list(prefix=self._get_keyname())
        for key in keys:
            keyname = key.name
            # Ignore folder keys
            if '$' not in keyname:
                suffix = keyname.split('/')[1:]  # removes team identifier
                filename = os.path.join(outdir, *suffix)
                dirname = os.path.dirname(filename)

                if not os.path.exists(dirname):
                    os.makedirs(dirname)

                key.get_contents_to_filename(filename)

    def describe(self):
        """
        Gets the current map-reduce job details.

        Returns a boto.emr.emrobject.JobFlow object.

        Special notes:
            The JobFlow object has the following relevant fields.
                state       <str>           the state of the job flow,
                                            either COMPLETED
                                                 | FAILED
                                                 | TERMINATED
                                                 | RUNNING
                                                 | SHUTTING_DOWN
                                                 | STARTING
                                                 | WAITING

                steps       <list(boto.emr.emrobject.Step)>
                            a list of the step details in the workflow.

            The Step object has the following relevant fields.
                state               <str>       the state of the step.

                startdatetime       <str>       the start time of the
                                                job.

                enddatetime         <str>       the end time of the job.

            WARNING! Amazon has an upper-limit on the frequency with
            which you can call this method; we have had success with
            calling it at most once every 10 seconds.
        """

        if not self.job_id:
            raise RankmaniacError('No job is running.')

        cinfo = self._emr_conn.describe_cluster(self.job_id)
        sinfo1 = self._emr_conn.list_steps(self.job_id)
        steps = sinfo1.steps

        if "marker" in dir(sinfo1):
            sinfo2 = self._emr_conn.list_steps(self.job_id,
                                               marker=sinfo1.marker)
            steps += sinfo2.steps

        return {"cluster": cinfo, "steps": steps}

    def _get_last_process_step_iter_no(self, jobdesc=None):
        """
        Returns the most recently process-step of the job flow that has
        been completed.

        Keyword arguments:
            jobdesc     <boto.emr.JobFlow>      cached description of
                                                jobflow to use
        """

        if jobdesc is None:
            jobdesc = self.describe()
        steps = jobdesc["steps"]

        cnt = 0
        for i in range(len(steps)):
            step = steps[i]
            if step.status.state != 'COMPLETED':
                continue

            cnt += 1

        return cnt / 2 - 1

    def _get_default_outdir(self, name, iter_no=None):
        """
        Returns the default output directory, which is 'iter_no/name/'.
        """

        if iter_no is None:
            iter_no = self._iter_no

        # Return iter_no/name/ **with** the trailing slash
        return '%s/%s/' % (iter_no, name)

    def _submit_new_job(self, steps):
        """
        Submits a new job to run on Amazon EMR.
        """

        if self.job_id is not None:
            raise RankmaniacError('A job is already running.')

        job_name = self._make_name()
        num_instances = self._num_instances
        log_uri = self._get_s3_team_uri('job_logs')
        self.job_id = self._emr_conn.run_jobflow(
            name=job_name,
            steps=steps,
            num_instances=num_instances,
            log_uri=log_uri,
            master_instance_type='m1.medium',
            slave_instance_type='m1.medium',
            ami_version='3.11.0',
            job_flow_role='EMR_EC2_DefaultRole',
            service_role='EMR_DefaultRole')

    def _make_step(self,
                   mapper,
                   reducer,
                   input,
                   output,
                   num_mappers=1,
                   num_reducers=1):
        """
        Returns a new step that runs the specified mapper and reducer,
        reading from the specified input and writing to the specified
        output.
        """

        bucket = self._s3_conn.get_bucket(self._s3_bucket)

        # Clear out current bucket/output contents for team
        keys = bucket.list(prefix=self._get_keyname(output))
        bucket.delete_keys(keys)

        mapper_uri = self._get_s3_team_uri(mapper)
        reducer_uri = self._get_s3_team_uri(reducer)
        step_name = self._make_name()
        step_args = [
            '-files',
            '%s,%s' % (mapper_uri, reducer_uri), '-jobconf',
            'mapred.map.tasks=%d' % (num_mappers), '-jobconf',
            'mapred.reduce.tasks=%d' % (num_reducers)
        ]

        return StreamingStep(name=step_name,
                             step_args=step_args,
                             mapper=mapper,
                             reducer=reducer,
                             input=self._get_s3_team_uri(input),
                             output=self._get_s3_team_uri(output))

    def _make_name(self):
        return strftime('%%s %m-%d-%Y %H:%M:%S', localtime()) % (self.team_id)

    def _get_keyname(self, *args):
        """
        Returns the key name to use in the grading bucket (for the
        particular team).

            'team_id/...'
        """

        return '%s/%s' % (self.team_id, '/'.join(args))

    def _get_s3_team_uri(self, *args):
        """
        Returns the Amazon S3 URI for the team submissions.
        """

        return 's3n://%s/%s' % (self._s3_bucket, self._get_keyname(*args))
예제 #3
0
def create_emr_cluster(cr):
    """
    @PARAM:  Cluster configuration reader object
    Creates an EMR cluster given a set of configuration parameters
    Return:  EMR Cluster ID
    """

    #region = cr.get_config("aws_region")
    #conn = boto.emr.connect_to_region(region)
    conn = EmrConnection(
        cr.get_config("aws_access_key"),
        cr.get_config("aws_secret_key"),
        region=RegionInfo(name=cr.get_config("aws_region"),
                          endpoint=cr.get_config("aws_region") +
                          ".elasticmapreduce.amazonaws.com"))

    #  Create list of instance groups:  master, core, and task
    instance_groups = []
    instance_groups.append(
        InstanceGroup(num_instances=cr.get_config("emr_master_node_count"),
                      role="MASTER",
                      type=cr.get_config("emr_master_node_type"),
                      market=cr.get_config("emr_market_type"),
                      name="Master Node"))

    instance_groups.append(
        InstanceGroup(num_instances=cr.get_config("emr_core_node_count"),
                      role="CORE",
                      type=cr.get_config("emr_core_node_type"),
                      market=cr.get_config("emr_market_type"),
                      name="Core Node"))

    #  Only create task nodes if specifcally asked for
    if cr.get_config("emr_task_node_count") > 0:
        instance_groups.append(
            InstanceGroup(num_instances=cr.get_config("emr_task_node_count"),
                          role="TASK",
                          type=cr.get_config("emr_task_node_type"),
                          market=cr.get_config("emr_market_type"),
                          name="Task Node"))

    print "Creating EMR Cluster with instance groups: {0}".format(
        instance_groups)

    #  Use these params to add overrrides, these will go away in Boto3
    api_params = {
        "Instances.Ec2SubnetId": cr.get_config("aws_subnet_id"),
        "ReleaseLabel": cr.get_config("emr_version")
    }

    #  Add step to load data
    step_args = [
        "s3-dist-cp", "--s3Endpoint=s3-us-west-1.amazonaws.com",
        "--src=s3://alpine-qa/automation/automation_test_data/",
        "--dest=hdfs:///automation_test_data", "--srcPattern=.*[a-zA-Z,]+"
    ]
    step = JarStep(name="s3distcp for data loading",
                   jar="command-runner.jar",
                   step_args=step_args,
                   action_on_failure="CONTINUE")

    cluster_id = conn.run_jobflow(
        cr.get_config("emr_cluster_name"),
        instance_groups=instance_groups,
        action_on_failure="TERMINATE_JOB_FLOW",
        keep_alive=True,
        enable_debugging=True,
        log_uri=cr.get_config("emr_log_uri"),
        #hadoop_version = "Amazon 2.7.2",
        #ReleaseLabel = "emr-5.0.0",
        #ami_version = "5.0.0",
        steps=[step],
        bootstrap_actions=[],
        ec2_keyname=cr.get_config("ec2_keyname"),
        visible_to_all_users=True,
        job_flow_role="EMR_EC2_DefaultRole",
        service_role="EMR_DefaultRole",
        api_params=api_params)

    print "EMR Cluster created, cluster id: {0}".format(cluster_id)
    state = conn.describe_cluster(cluster_id).status.state
    while state != u'COMPLETED' and state != u'SHUTTING_DOWN' and state != u'FAILED' and state != u'WAITING':
        #sleeping to recheck for status.
        time.sleep(5)
        state = conn.describe_cluster(cluster_id).status.state
        print "State is: {0}, sleeping 5s...".format(state)

    if state == u'SHUTTING_DOWN' or state == u'FAILED':
        return "ERROR"

    #Check if the state is WAITING. Then launch the next steps
    if state == u'WAITING':
        #Finding the master node dns of EMR cluster
        master_dns = conn.describe_cluster(cluster_id).masterpublicdnsname
        print "DNS Name: {0}".format(master_dns)
        return cluster_id
예제 #4
0
def create_data_source_variable(cluster_id, cr):
    """
    Creates a data source variable .json file using the cluster_id of an EMR cluster_id
    @PARAM:  cluster_id:  ID of an EMR cluster
    return:  True if success, creates a file in the pwd 'default_emr.json'

    Object created should look like:

    HADOOP_DATA_SOURCE_NAME="emr_data_source"
    HADOOP_DATA_SOURCE_DISTRO="Cloudera CDH5.4-5.7"
    HADOOP_DATA_SOURCE_HOST="emr_master_dns_hostname"
    HADOOP_DATA_SOURCE_PORT=8020
    HADOOP_DATA_SOURCE_USER="******"
    HADOOP_DATA_SOURCE_GROUP="hadoop"
    HADOOP_DATA_SOURCE_JT_HOST="emr_master_dns_hostname"
    HADOOP_DATA_SOURCE_JT_PORT=8032
    CONNECTION_PARAMETERS='[{"key":"mapreduce.jobhistory.address", "value":"0.0.0.0:10020"}, ' \
                            '{"key":"mapreduce.jobhistory.webapp.address", "value":"cdh5hakerberosnn.alpinenow.local:19888"}, ' \
                            '{"key":"yarn.app.mapreduce.am.staging-dir", "value":"/tmp/hadoop-yarn/staging"}, ' \
                            '{"key":"yarn.resourcemanager.admin.address", "value":"cdh5hakerberosnn.alpinenow.local:8033"}, ' \
                            '{"key":"yarn.resourcemanager.resource-tracker.address", "value":"cdh5hakerberosnn.alpinenow.local:8031"}, ' \
                            '{"key":"yarn.resourcemanager.scheduler.address", "value":"cdh5hakerberosnn.alpinenow.local:8030"}]'

    """
    conn = EmrConnection(
        cr.get_config("aws_access_key"),
        cr.get_config("aws_secret_key"),
        region=RegionInfo(name=cr.get_config("aws_region"),
                          endpoint=cr.get_config("aws_region") +
                          ".elasticmapreduce.amazonaws.com"))

    emr_cluster = conn.describe_cluster(cluster_id)
    master_dns_hostname = emr_cluster.masterpublicdnsname

    # Build up connection parameters
    conn_params = []
    conn_params.append({
        "key": "mapreduce.jobhistory.address",
        "value": "{0}:10020".format(master_dns_hostname)
    })
    conn_params.append({
        "key": "mapreduce.jobhistory.webapp.address",
        "value": "{0}:19888".format(master_dns_hostname)
    })
    conn_params.append({
        "key": "yarn.app.mapreduce.am.staging-dir",
        "value": "/user"
    })
    conn_params.append({
        "key": "yarn.resourcemanager.admin.address",
        "value": "{0}:8033".format(master_dns_hostname)
    })
    conn_params.append({
        "key": "yarn.resourcemanager.scheduler.address",
        "value": "{0}:8030".format(master_dns_hostname)
    })
    conn_params_str = "CONNECTION_PARAMETERS=\"{0}\"".format(conn_params)
    email_str = "EMAIL=\"avalanche_{0}.alpinenow.com\"".format(
        random.randint(1, 99999))

    with open("emr_default.conf", "w") as f:
        f.writelines("HADOOP_DATA_SOURCE_NAME=\"{0}\"\n".format(
            cr.get_config("emr_cluster_name")))
        f.writelines(
            "HADOOP_DATA_SOURCE_DISTRO=\"{0}\"\n".format("Amazon EMR5"))
        f.writelines(
            "HADOOP_DATA_SOURCE_HOST=\"{0}\"\n".format(master_dns_hostname))
        f.writelines("HADOOP_DATA_SOURCE_POST=\"8020\"\n")
        f.writelines("HADOOP_DATA_SOURCE_USER=\"hdfs\"\n")
        f.writelines("HADOOP_DATA_SOURCE_GROUP=\"hadoop\"\n")
        f.writelines(
            "HADOOP_DATA_SOURCE_JT_HOST=\"{0}\"\n".format(master_dns_hostname))
        f.writelines("HADOOP_DATA_SOURCE_JT_PORT=\"8032\"\n")
        f.writelines(email_str)
        f.writelines(conn_params_str)
예제 #5
0
def create_emr_cluster(cr):
    """
    @PARAM:  Cluster configuration reader object
    Creates an EMR cluster given a set of configuration parameters
    Return:  EMR Cluster ID
    """

    #region = cr.get_config("aws_region")
    #conn = boto.emr.connect_to_region(region)
    conn = EmrConnection(
        cr.get_config("aws_access_key"),
        cr.get_config("aws_secret_key"),
        region = RegionInfo(name = cr.get_config("aws_region"),
                            endpoint = cr.get_config("aws_region") + ".elasticmapreduce.amazonaws.com" ))


    #  Create list of instance groups:  master, core, and task
    instance_groups = []
    instance_groups.append(InstanceGroup(
        num_instances = cr.get_config("emr_master_node_count"),
        role = "MASTER",
        type = cr.get_config("emr_master_node_type"),
        market = cr.get_config("emr_market_type"),
        name = "Master Node" ))

    instance_groups.append(InstanceGroup(
        num_instances = cr.get_config("emr_core_node_count"),
        role = "CORE",
        type = cr.get_config("emr_core_node_type"),
        market = cr.get_config("emr_market_type"),
        name = "Core Node" ))

    #  Only create task nodes if specifcally asked for
    if cr.get_config("emr_task_node_count") > 0:
        instance_groups.append(InstanceGroup(
            num_instances = cr.get_config("emr_task_node_count"),
            role = "TASK",
            type = cr.get_config("emr_task_node_type"),
            market = cr.get_config("emr_market_type"),
            name = "Task Node" ))

    print "Creating EMR Cluster with instance groups: {0}".format(instance_groups)

    #  Use these params to add overrrides, these will go away in Boto3
    api_params = {"Instances.Ec2SubnetId": cr.get_config("aws_subnet_id"), "ReleaseLabel": cr.get_config("emr_version")}

    #  Add step to load data
    step_args = ["s3-dist-cp","--s3Endpoint=s3-us-west-1.amazonaws.com","--src=s3://alpine-qa/automation/automation_test_data/","--dest=hdfs:///automation_test_data","--srcPattern=.*[a-zA-Z,]+"]
    step = JarStep(name = "s3distcp for data loading",
                jar = "command-runner.jar",
                step_args = step_args,
                action_on_failure = "CONTINUE"
                )

    cluster_id = conn.run_jobflow(
        cr.get_config("emr_cluster_name"),
        instance_groups = instance_groups,
        action_on_failure = "TERMINATE_JOB_FLOW",
        keep_alive = True,
        enable_debugging = True,
        log_uri = cr.get_config("emr_log_uri"),
        #hadoop_version = "Amazon 2.7.2",
        #ReleaseLabel = "emr-5.0.0",
        #ami_version = "5.0.0",
        steps = [step],
        bootstrap_actions = [],
        ec2_keyname = cr.get_config("ec2_keyname"),
        visible_to_all_users = True,
        job_flow_role = "EMR_EC2_DefaultRole",
        service_role = "EMR_DefaultRole",
        api_params = api_params )

    print "EMR Cluster created, cluster id: {0}".format(cluster_id)
    state = conn.describe_cluster(cluster_id).status.state
    while state != u'COMPLETED' and state != u'SHUTTING_DOWN' and state != u'FAILED' and state != u'WAITING':
        #sleeping to recheck for status.
        time.sleep(5)
        state = conn.describe_cluster(cluster_id).status.state
        print "State is: {0}, sleeping 5s...".format(state)

    if state == u'SHUTTING_DOWN' or state == u'FAILED':
        return "ERROR"

    #Check if the state is WAITING. Then launch the next steps
    if state == u'WAITING':
        #Finding the master node dns of EMR cluster
        master_dns = conn.describe_cluster(cluster_id).masterpublicdnsname
        print "DNS Name: {0}".format(master_dns)
        return cluster_id
예제 #6
0
class EMRCluster(object):
    '''Representation of an EMR cluster.
     TODO: add bridge to boto interface for unit test.
  '''
    emr_status_delay = 10  # in sec
    emr_status_max_delay = 60  # in sec
    emr_status_max_error = 30  # number of errors
    emr_max_idle = 10 * 60  # 10 min (in sec)
    rate_limit_lock = RateLimitLock()

    def __init__(self, prop):
        '''Constructor, initialize EMR connection.'''
        self.prop = prop
        self.conn = EmrConnection(self.prop.ec2.key, self.prop.ec2.secret)
        self.jobid = None
        self.retry = 0
        self.level = 0
        self.last_update = -1

    @property
    def priority(self):
        '''The priority used in EMRManager.
       The lower value, the higher priority.
    '''
        with EMRCluster.rate_limit_lock:
            if self.jobid is None:
                return 1
            return 0

    def get_instance_groups(self):
        '''Get instance groups to start a cluster.
       It calculates the price with self.level, which indicates the
       price upgrades from the original price.
    '''
        instance_groups = []
        for group in self.prop.emr.instance_groups:
            (num, group_name, instance_type) = group
            level = max(0,
                        min(self.level,
                            len(self.prop.emr.price_upgrade_rate) -
                            1))  # 0 <= level < len(...)
            bprice = self.prop.emr.prices[
                instance_type] * self.prop.emr.price_upgrade_rate[level]
            name = '%s-%s@%f' % (group_name, 'SPOT', bprice)

            # Use on-demand instance if prices are zero.
            if bprice > 0:
                ig = InstanceGroup(num, group_name, instance_type, 'SPOT',
                                   name, '%.3f' % bprice)
            else:
                ig = InstanceGroup(num, group_name, instance_type, 'ON_DEMAND',
                                   name)

            instance_groups.append(ig)

        return instance_groups

    def get_bootstrap_actions(self):
        '''Get list of bootstrap actions from property'''
        actions = []
        for bootstrap_action in self.prop.emr.bootstrap_actions:
            assert len(bootstrap_action
                       ) >= 2, 'Wrong bootstrap action definition: ' + str(
                           bootstrap_action)
            actions.append(
                BootstrapAction(bootstrap_action[0], bootstrap_action[1],
                                bootstrap_action[2:]))
        return actions

    @synchronized
    def start(self):
        '''Start a EMR cluster.'''
        # emr.project_name is required
        if self.prop.emr.project_name is None:
            raise ValueError('emr.project_name is not set')

        self.last_update = time.time()
        with EMRCluster.rate_limit_lock:
            self.jobid = self.conn.run_jobflow(
                name=self.prop.emr.cluster_name,
                ec2_keyname=self.prop.emr.keyname,
                log_uri=self.prop.emr.log_uri,
                ami_version=self.prop.emr.ami_version,
                bootstrap_actions=self.get_bootstrap_actions(),
                keep_alive=True,
                action_on_failure='CONTINUE',
                api_params={'VisibleToAllUsers': 'true'},
                instance_groups=self.get_instance_groups())
        message('Job flow created: %s', self.jobid)

        # Tag EC2 instances to allow future analysis
        tags = {
            'FlowControl': 'Briefly',
            'Project': self.prop.emr.project_name
        }
        if self.prop.emr.tags is not None:
            assert isinstance(self.prop.emr.tags, dict)
            tags = dict(tags.items() + self.prop.emr.tags.items())
        self.conn.add_tags(self.jobid, tags)

    @synchronized
    def terminate(self, level_upgrade=0):
        '''Terminate this EMR cluster.'''
        if self.jobid is None:
            return

        self.level += level_upgrade  # upgrade to another price level

        message('Terminate jobflow: %s', self.jobid)
        for i in range(3):
            try:
                with EMRCluster.rate_limit_lock:
                    self.conn.terminate_jobflow(self.jobid)
                break
            except Exception as e:
                message('Unable to terminate job flow: %s', self.jobid)
                message(traceback.format_exc())
        # We have to set jobid as None to create new cluster;
        # otherwise, run_steps will keep launching jobs on the bad cluster.
        self.jobid = None

    def is_idle(self):
        '''Check if this EMR cluster is idle?'''
        return (not self.jobid is None) and (
            (time.time() - self.last_update) > self.emr_max_idle)

    def get_steps(self, node):
        '''Get the jar step from the node.'''
        step = JarStep(name=node.config.sub(node.config.emr.step_name,
                                            node_hash=node.hash()),
                       main_class=node.config.main_class,
                       jar=node.config.hadoop.jar,
                       action_on_failure='CONTINUE',
                       step_args=node.process_args(*node.config.args))
        return [step]

    def get_step_index(self, step_id):
        '''Get the index of a step given step_id (1 based)'''
        steps = [
            step.id
            for step in reversed(self.conn.list_steps(self.jobid).steps)
            if step.status is not None
        ]

        # revert the index since latest step is on top of the list
        return steps.index(step_id) + 1

    def run_steps(self, node, wait=True):
        '''Main loop to execute a node.
       It will block until step complete or failure, and will raise
       exception for failures so that the step will be retried.
       TODO: add timeouts for each step?
       TODO: dynamic increase cluster size?
    '''
        if not self.jobid:
            self.start()

        try:
            with EMRCluster.rate_limit_lock:
                # Here we just add single step. And get the step_id for fallowing checks.
                step_id = self.conn.add_jobflow_steps(
                    self.jobid, self.get_steps(node)).stepids[0].value
                assert step_id is not None
        except Exception as e:
            node.log('Unable to add jobflow steps: %s', node.hash())
            node.log('%s', traceback.format_exc())
            raise HadoopFailure()

        status_error_counter = 0
        step_status = 'PENDING'
        step_index = None
        step_start = time.time()

        # notify the node with status.
        node.notify_status('Running on EMR: %s', self.jobid)

        while wait and step_status in ['PENDING', 'RUNNING']:
            try:
                # wait first for the status turning to 'RUNNING' from 'WAITING'. Exponential delay for errors.
                # Cap delay to a predefined limit.
                delay = min(self.emr_status_delay * (2**status_error_counter),
                            self.emr_status_max_delay)
                time.sleep(delay)

                # Keep current cluster alive.
                self.last_update = time.time()

                # Get current cluster status. May raise exception due to EMR request throttle.
                cluster_state = self.conn.describe_cluster(
                    self.jobid).status.state

                if step_index is None:
                    step_index = self.get_step_index(step_id)
                    node.log('Step #: %d', step_index)
                    node.log('Log URI: %s/%s/steps/%d/',
                             node.config.emr.log_uri, self.jobid, step_index)

                step_status = self.conn.describe_step(self.jobid,
                                                      step_id).status.state
                status_error_counter = 0  # reset counter
                node.log("%s: %s %s", self.jobid, cluster_state, step_status)

                if cluster_state in [
                        'TERMINATING', 'TERMINATED', 'TERMINATED_WITH_ERRORS'
                ]:  # cluster kill (maybe due to spot price), upgrade.
                    self.terminate(1)
                    break

                if (
                        time.time() - step_start
                ) > node.config.emr.step_timeout:  # Step running too long? EMR cluster idle.
                    node.log('Step running too long. Restart with new cluster')
                    self.terminate()
                    break

            except KeyboardInterrupt:
                raise
            except Exception as e:
                node.log('EMR loop exception: %d error(s)',
                         status_error_counter)
                status_error_counter += 1
                if status_error_counter > self.emr_status_max_error:
                    self.terminate()
                    node.log('Too many errors in EMR loop')
                    node.log('Exception: %s', traceback.format_exc())
                    raise

        if step_status != 'COMPLETED':
            raise HadoopFailure()
예제 #7
0
class Rankmaniac:
    """
    (wrapper class)

    This class presents a simple wrapper around the AWS SDK. It strives
    to provide all the functionality required to run map-reduce
    (Hadoop) on Amazon. This way the students do not need to worry about
    learning the API for Amazon S3 and EMR, and instead can focus on
    computing pagerank quickly!
    """

    DefaultRegionName = 'us-west-2'
    DefaultRegionEndpoint = 'elasticmapreduce.us-west-2.amazonaws.com'

    def __init__(self, team_id, access_key, secret_key,
                 bucket='cs144students'):
        """
        (constructor)

        Creates a new instance of the Rankmaniac class for a specific
        team using the provided credentials.

        Arguments:
            team_id       <str>     the team identifier, which may be
                                    differ slightly from the actual team
                                    name.

            access_key    <str>     the AWS access key identifier.
            secret_key    <str>     the AWS secret acess key.

        Keyword arguments:
            bucket        <str>     the S3 bucket name.
        """

        region = RegionInfo(None, self.DefaultRegionName,
                            self.DefaultRegionEndpoint)

        self._s3_bucket = bucket
        self._s3_conn = S3Connection(access_key, secret_key)
        self._emr_conn = EmrConnection(access_key, secret_key, region=region)

        self.team_id = team_id
        self.job_id = None

        self._reset()
        self._num_instances = 1

    def _reset(self):
        """
        Resets the internal state of the job and submission.
        """

        self._iter_no = 0
        self._infile = None
        self._last_outdir = None

        self._last_process_step_iter_no = -1
        self._is_done = False

    def __del__(self):
        """
        (destructor)

        Terminates the map-reduce job if any, and closes the connections
        to Amazon S3 and EMR.
        """

        if self.job_id is not None:
            self.terminate()

        self._s3_conn.close()
        self._emr_conn.close()

    def __enter__(self):
        """
        Used for `with` syntax. Simply returns this instance since the
        set-up has all been done in the constructor.
        """

        return self

    def __exit__(self, type, value, traceback):
        """
        Refer to __del__().
        """

        self.__del__()
        return False # do not swallow any exceptions

    def upload(self, indir='data'):
        """
        Uploads the local data to Amazon S3 under the configured bucket
        and key prefix (the team identifier). This way the code can be
        accessed by Amazon EMR to compute pagerank.

        Keyword arguments:
            indir       <str>       the base directory from which to
                                    upload contents.

        Special notes:
            This method only uploads **files** in the specified
            directory. It does not scan through subdirectories.

            WARNING! This method removes all previous (or ongoing)
            submission results, so it is unsafe to call while a job is
            already running (and possibly started elsewhere).
        """

        if self.job_id is not None:
            raise RankmaniacError('A job is already running.')

        bucket = self._s3_conn.get_bucket(self._s3_bucket)

        # Clear out current bucket contents for team
        keys = bucket.list(prefix=self._get_keyname())
        bucket.delete_keys(keys)

        for filename in os.listdir(indir):
            relpath = os.path.join(indir, filename)
            if os.path.isfile(relpath):
                keyname = self._get_keyname(filename)
                key = bucket.new_key(keyname)
                key.set_contents_from_filename(relpath)

    def set_infile(self, filename):
        """
        Sets the data file to use for the first iteration of the
        pagerank step in the map-reduce job.
        """

        if self.job_id is not None:
            raise RankmaniacError('A job is already running.')

        self._infile = filename

    def do_iter(self, pagerank_mapper, pagerank_reducer,
                process_mapper, process_reducer,
                num_pagerank_mappers=1, num_pagerank_reducers=1):
        """
        Adds a pagerank step and a process step to the current job.
        """

        self.do_niter(1, pagerank_mapper, pagerank_reducer,
                      process_mapper, process_reducer,
                      num_pagerank_mappers=num_pagerank_mappers,
                      num_pagerank_reducers=num_pagerank_reducers)

    def do_niter(self, n, pagerank_mapper, pagerank_reducer,
                 process_mapper, process_reducer,
                 num_pagerank_mappers=1, num_pagerank_reducers=1):
        """
        Adds N pagerank steps and N process steps to the current job.
        """

        num_process_mappers = 1
        num_process_reducers = 1

        iter_no = self._iter_no
        last_outdir = self._last_outdir
        steps = []
        for _ in range(n):
            if iter_no == 0:
                pagerank_input = self._infile
            elif iter_no > 0:
                pagerank_input = last_outdir

            pagerank_output = self._get_default_outdir('pagerank', iter_no)

            # Output from the pagerank step becomes input to process step
            process_input = pagerank_output

            process_output = self._get_default_outdir('process', iter_no)

            pagerank_step = self._make_step(pagerank_mapper, pagerank_reducer,
                                            pagerank_input, pagerank_output,
                                            num_pagerank_mappers,
                                            num_pagerank_reducers)

            process_step = self._make_step(process_mapper, process_reducer,
                                           process_input, process_output,
                                           num_process_mappers,
                                           num_process_reducers)

            steps.extend([pagerank_step, process_step])

            # Store `process_output` directory so it can be used in
            # subsequent iteration
            last_outdir = process_output
            iter_no += 1

        if self.job_id is None:
            self._submit_new_job(steps)
        else:
            self._emr_conn.add_jobflow_steps(self.job_id, steps)

        # Store directory and so it can be used in subsequent iteration;
        # however, only do so after the job was submitted or the steps
        # were added in case an exception occurs
        self._last_outdir = last_outdir
        self._iter_no = iter_no

    def is_done(self, jobdesc=None):
        """
        Returns `True` if the map-reduce job is done, and `False`
        otherwise.

        For all process-step output files that have not been fetched,
        gets the first part of the output file, and checks whether its
        contents begins with the string 'FinalRank'.

        Keyword arguments:
            jobdesc     <boto.emr.JobFlow>      cached description of
                                                jobflow to use

        Special notes:
            WARNING! The usage of this method in your code requires that
            that you used the default output directories in all calls
            to do_iter().
        """

        # Cache the result so we can return immediately without hitting
        # any of the Amazon APIs
        if self._is_done:
            return True
        iter_no = self._get_last_process_step_iter_no(jobdesc=jobdesc)
        if iter_no < 0:
            return False
        i = self._last_process_step_iter_no

        while i < iter_no:
            i += 1
            outdir = self._get_default_outdir('process', iter_no=i)
            keyname = self._get_keyname(outdir, 'part-00000')

            bucket = self._s3_conn.get_bucket(self._s3_bucket)
            key = bucket.get_key(keyname)
            contents = ''

            if key is not None:
                contents = key.next() # get first chunk of the output file
            if contents.startswith('FinalRank'):
                self._is_done = True # cache result
                break

        self._last_process_step_iter_no = i

        return self._is_done

    def is_alive(self, jobdesc=None):
        """
        Checks whether the jobflow has completed, failed, or been
        terminated.

        Keyword arguments:
            jobdesc     <boto.emr.JobFlow>      cached description of
                                                jobflow to use

        Special notes:
            WARNING! This method should only be called **after**
            is_done() in order to be able to distinguish between the
            cases where the map-reduce job has outputted 'FinalRank'
            on its final iteration and has a 'COMPLETED' state.
        """

        if jobdesc is None:
            jobdesc = self.describe()

        if jobdesc["cluster"].status.state in ('TERMINATED_WITH_ERRORS', 'TERMINATED'):
            return False

        return True

    def terminate(self):
        """
        Terminates a running map-reduce job.
        """

        if not self.job_id:
            raise RankmaniacError('No job is running.')

        self._emr_conn.terminate_jobflow(self.job_id)
        self.job_id = None

        self._reset()

    def download(self, outdir='results'):
        """
        Downloads the results from Amazon S3 to the local directory.

        Keyword arguments:
            outdir      <str>       the base directory to which to
                                    download contents.

        Special notes:
            This method downloads all keys (files) from the configured
            bucket for this particular team. It creates subdirectories
            as needed.
        """

        bucket = self._s3_conn.get_bucket(self._s3_bucket)
        keys = bucket.list(prefix=self._get_keyname())
        for key in keys:
            keyname = key.name
            # Ignore folder keys
            if '$' not in keyname:
                suffix = keyname.split('/')[1:] # removes team identifier
                filename = os.path.join(outdir, *suffix)
                dirname = os.path.dirname(filename)

                if not os.path.exists(dirname):
                    os.makedirs(dirname)

                key.get_contents_to_filename(filename)

    def describe(self):
        """
        Gets the current map-reduce job details.

        Returns a boto.emr.emrobject.JobFlow object.

        Special notes:
            The JobFlow object has the following relevant fields.
                state       <str>           the state of the job flow,
                                            either COMPLETED
                                                 | FAILED
                                                 | TERMINATED
                                                 | RUNNING
                                                 | SHUTTING_DOWN
                                                 | STARTING
                                                 | WAITING

                steps       <list(boto.emr.emrobject.Step)>
                            a list of the step details in the workflow.

            The Step object has the following relevant fields.
                state               <str>       the state of the step.

                startdatetime       <str>       the start time of the
                                                job.

                enddatetime         <str>       the end time of the job.

            WARNING! Amazon has an upper-limit on the frequency with
            which you can call this method; we have had success with
            calling it at most once every 10 seconds.
        """

        if not self.job_id:
            raise RankmaniacError('No job is running.')
            
        cinfo = self._emr_conn.describe_cluster(self.job_id)
        sinfo1 = self._emr_conn.list_steps(self.job_id)
        steps = sinfo1.steps

        if "marker" in dir(sinfo1):
            sinfo2 = self._emr_conn.list_steps(self.job_id, marker=sinfo1.marker)
            steps += sinfo2.steps

        return {"cluster": cinfo, "steps": steps}

    def _get_last_process_step_iter_no(self, jobdesc=None):
        """
        Returns the most recently process-step of the job flow that has
        been completed.

        Keyword arguments:
            jobdesc     <boto.emr.JobFlow>      cached description of
                                                jobflow to use
        """

        if jobdesc is None:
            jobdesc = self.describe()
        steps = jobdesc["steps"]
    
        cnt = 0
        for i in range(len(steps)):
            step = steps[i]
            if step.status.state != 'COMPLETED':
                continue

            cnt += 1

        return cnt / 2 - 1

    def _get_default_outdir(self, name, iter_no=None):
        """
        Returns the default output directory, which is 'iter_no/name/'.
        """

        if iter_no is None:
            iter_no = self._iter_no

        # Return iter_no/name/ **with** the trailing slash
        return '%s/%s/' % (iter_no, name)

    def _submit_new_job(self, steps):
        """
        Submits a new job to run on Amazon EMR.
        """

        if self.job_id is not None:
            raise RankmaniacError('A job is already running.')

        job_name = self._make_name()
        num_instances = self._num_instances
        log_uri = self._get_s3_team_uri('job_logs')
        self.job_id = self._emr_conn.run_jobflow(name=job_name,
                                                 steps=steps,
                                                 num_instances=num_instances,
                                                 log_uri=log_uri,
                                                 master_instance_type='m1.medium',
                                                 slave_instance_type='m1.medium',
                                                 ami_version='3.11.0',
                                                 job_flow_role='EMR_EC2_DefaultRole',
                                                 service_role='EMR_DefaultRole')

    def _make_step(self, mapper, reducer, input, output,
                   num_mappers=1, num_reducers=1):
        """
        Returns a new step that runs the specified mapper and reducer,
        reading from the specified input and writing to the specified
        output.
        """

        bucket = self._s3_conn.get_bucket(self._s3_bucket)

        # Clear out current bucket/output contents for team
        keys = bucket.list(prefix=self._get_keyname(output))
        bucket.delete_keys(keys)

        mapper_uri = self._get_s3_team_uri(mapper)
        reducer_uri = self._get_s3_team_uri(reducer)
        step_name = self._make_name()
        step_args = ['-files', '%s,%s' % (mapper_uri, reducer_uri),
                     '-jobconf', 'mapred.map.tasks=%d' % (num_mappers),
                     '-jobconf', 'mapred.reduce.tasks=%d' % (num_reducers)]

        return StreamingStep(name=step_name,
                            step_args=step_args,
                            mapper=mapper,
                            reducer=reducer,
                            input=self._get_s3_team_uri(input),
                            output=self._get_s3_team_uri(output))

    def _make_name(self):
        return strftime('%%s %m-%d-%Y %H:%M:%S', localtime()) % (self.team_id)

    def _get_keyname(self, *args):
        """
        Returns the key name to use in the grading bucket (for the
        particular team).

            'team_id/...'
        """

        return '%s/%s' % (self.team_id, '/'.join(args))

    def _get_s3_team_uri(self, *args):
        """
        Returns the Amazon S3 URI for the team submissions.
        """

        return 's3n://%s/%s' % (self._s3_bucket, self._get_keyname(*args))
    def post(self):
        if not boto.config.has_section('Boto'):
            boto.config.add_section('Boto')
        boto.config.set('Boto', 'https_validate_certificates', 'False')
        note = ''
        data_para = [0, 0, 0, 0, 0]
        s3_connection = S3Connection(access_id, access_key)
        bucket = s3_connection.get_bucket('bucket774')
        k = Key(bucket)
        k.key = 'temp_para.json'
        temp_para = json.loads(k.get_contents_as_string())
        if (temp_para[6] == 1):
            k.key = 'cluster_id'
            cluster_id = k.get_contents_as_string()
            conn = EmrConnection(access_id, access_key)
            if (temp_para[7] == 0):
                status = conn.describe_cluster(cluster_id)
                if (status.status.state == 'WAITING'):
                    PYdata = get_output()
                    conn.terminate_jobflow(cluster_id)
                    data = in_circle_to_pi(PYdata, temp_para[0])
                    k.key = 'temp_para.json'
                    temp_para[6] = 0
                    k.set_contents_from_string(json.dumps(temp_para))
                    data_para[0:4] = temp_para[0:4]
                    data_para[4] = json.loads(data)[-1]
                    note = 'last emr job done, reslut have been updated'
                    save_result(data, json.dumps(data_para))

                else:
                    note = 'last emr calculation havet finished,please waitting.'
                    k.key = 'record.json'
                    data = k.get_contents_as_string()
                    k.key = 'record_para.json'
                    data_para_json = k.get_contents_as_string()
                    data_para = json.loads(data_para_json)
            elif (temp_para[7] == 1):
                status = conn.describe_cluster(cluster_id)
                if (status.status.state == 'WAITING'):
                    k.key = 'temp_data.json'
                    PYdata = np.array(json.loads(k.get_contents_as_string()))
                    PYdata += get_output()
                    if (round(
                            np.sum(PYdata) / (temp_para[3] * temp_para[5]),
                            temp_para[4]) == round(math.pi, temp_para[4])):
                        for i in range(1, len(PYdata)):
                            PYdata[i] += PYdata[i - 1]
                            PYdata[i - 1] /= temp_para[0] * (i) * temp_para[5]
                        PYdata[len(PYdata) -
                               1] /= temp_para[0] * len(PYdata) * temp_para[5]
                        data = json.dumps(
                            PYdata.tolist())  #covernt numpy array to list

                        k.key = 'temp_para.json'
                        temp_para[6] = 0
                        k.set_contents_from_string(json.dumps(temp_para))
                        data_para[0:4] = temp_para[0:4]
                        data_para[4] = json.loads(data)[-1]
                        conn.terminate_jobflow(cluster_id)
                        note = 'last emr job done,result have been updated'
                        save_result(data, json.dumps(data_para))
                    else:
                        note = str(np.sum(PYdata)) + ',' + str(
                            temp_para[3]) + ',' + str(temp_para[5])
                        add_step_emr(conn, cluster_id)
                        save_temp_result(PYdata)
                        for key in bucket.list(prefix='output/'):
                            key.delete()
                        temp_para[5] += 1
                        k.key = 'temp_para.json'
                        k.set_contents_from_string(json.dumps(temp_para))
                        #note='havet find the given accuracy in last run, keep working'
                        k.key = 'record.json'
                        data = k.get_contents_as_string()
                        k.key = 'record_para.json'
                        data_para_json = k.get_contents_as_string()
                        data_para = json.loads(data_para_json)
                else:
                    note = 'last emr calculation havet finished,please waitting.'
                    k.key = 'record.json'
                    data = k.get_contents_as_string()
                    k.key = 'record_para.json'
                    data_para_json = k.get_contents_as_string()
                    data_para = json.loads(data_para_json)
        else:
            k.key = 'record.json'
            data = k.get_contents_as_string()
            k.key = 'record_para.json'
            data_para_json = k.get_contents_as_string()
            data_para = json.loads(data_para_json)

        doRender(
            self, 'chart.htm', {
                'Data': data,
                'shots_each_threat': data_para[0],
                'R': data_para[1],
                'Q': data_para[2],
                'pi': math.pi,
                'shots': data_para[3],
                'result': data_para[4],
                'note': note
            })