예제 #1
0
class Rankmaniac:
    """
    (wrapper class)

    This class presents a simple wrapper around the AWS SDK. It strives
    to provide all the functionality required to run map-reduce
    (Hadoop) on Amazon. This way the students do not need to worry about
    learning the API for Amazon S3 and EMR, and instead can focus on
    computing pagerank quickly!
    """

    DefaultRegionName = 'us-west-2'
    DefaultRegionEndpoint = 'elasticmapreduce.us-west-2.amazonaws.com'

    def __init__(self,
                 team_id,
                 access_key,
                 secret_key,
                 bucket='cs144students'):
        """
        (constructor)

        Creates a new instance of the Rankmaniac class for a specific
        team using the provided credentials.

        Arguments:
            team_id       <str>     the team identifier, which may be
                                    differ slightly from the actual team
                                    name.

            access_key    <str>     the AWS access key identifier.
            secret_key    <str>     the AWS secret acess key.

        Keyword arguments:
            bucket        <str>     the S3 bucket name.
        """

        region = RegionInfo(None, self.DefaultRegionName,
                            self.DefaultRegionEndpoint)

        self._s3_bucket = bucket
        self._s3_conn = S3Connection(access_key, secret_key)
        self._emr_conn = EmrConnection(access_key, secret_key, region=region)

        self.team_id = team_id
        self.job_id = None

        self._reset()
        self._num_instances = 1

    def _reset(self):
        """
        Resets the internal state of the job and submission.
        """

        self._iter_no = 0
        self._infile = None
        self._last_outdir = None

        self._last_process_step_iter_no = -1
        self._is_done = False

    def __del__(self):
        """
        (destructor)

        Terminates the map-reduce job if any, and closes the connections
        to Amazon S3 and EMR.
        """

        if self.job_id is not None:
            self.terminate()

        self._s3_conn.close()
        self._emr_conn.close()

    def __enter__(self):
        """
        Used for `with` syntax. Simply returns this instance since the
        set-up has all been done in the constructor.
        """

        return self

    def __exit__(self, type, value, traceback):
        """
        Refer to __del__().
        """

        self.__del__()
        return False  # do not swallow any exceptions

    def upload(self, indir='data'):
        """
        Uploads the local data to Amazon S3 under the configured bucket
        and key prefix (the team identifier). This way the code can be
        accessed by Amazon EMR to compute pagerank.

        Keyword arguments:
            indir       <str>       the base directory from which to
                                    upload contents.

        Special notes:
            This method only uploads **files** in the specified
            directory. It does not scan through subdirectories.

            WARNING! This method removes all previous (or ongoing)
            submission results, so it is unsafe to call while a job is
            already running (and possibly started elsewhere).
        """

        if self.job_id is not None:
            raise RankmaniacError('A job is already running.')

        bucket = self._s3_conn.get_bucket(self._s3_bucket)

        # Clear out current bucket contents for team
        keys = bucket.list(prefix=self._get_keyname())
        bucket.delete_keys(keys)

        for filename in os.listdir(indir):
            relpath = os.path.join(indir, filename)
            if os.path.isfile(relpath):
                keyname = self._get_keyname(filename)
                key = bucket.new_key(keyname)
                key.set_contents_from_filename(relpath)

    def set_infile(self, filename):
        """
        Sets the data file to use for the first iteration of the
        pagerank step in the map-reduce job.
        """

        if self.job_id is not None:
            raise RankmaniacError('A job is already running.')

        self._infile = filename

    def do_iter(self,
                pagerank_mapper,
                pagerank_reducer,
                process_mapper,
                process_reducer,
                num_pagerank_mappers=1,
                num_pagerank_reducers=1):
        """
        Adds a pagerank step and a process step to the current job.
        """

        self.do_niter(1,
                      pagerank_mapper,
                      pagerank_reducer,
                      process_mapper,
                      process_reducer,
                      num_pagerank_mappers=num_pagerank_mappers,
                      num_pagerank_reducers=num_pagerank_reducers)

    def do_niter(self,
                 n,
                 pagerank_mapper,
                 pagerank_reducer,
                 process_mapper,
                 process_reducer,
                 num_pagerank_mappers=1,
                 num_pagerank_reducers=1):
        """
        Adds N pagerank steps and N process steps to the current job.
        """

        num_process_mappers = 1
        num_process_reducers = 1

        iter_no = self._iter_no
        last_outdir = self._last_outdir
        steps = []
        for _ in range(n):
            if iter_no == 0:
                pagerank_input = self._infile
            elif iter_no > 0:
                pagerank_input = last_outdir

            pagerank_output = self._get_default_outdir('pagerank', iter_no)

            # Output from the pagerank step becomes input to process step
            process_input = pagerank_output

            process_output = self._get_default_outdir('process', iter_no)

            pagerank_step = self._make_step(pagerank_mapper, pagerank_reducer,
                                            pagerank_input, pagerank_output,
                                            num_pagerank_mappers,
                                            num_pagerank_reducers)

            process_step = self._make_step(process_mapper, process_reducer,
                                           process_input, process_output,
                                           num_process_mappers,
                                           num_process_reducers)

            steps.extend([pagerank_step, process_step])

            # Store `process_output` directory so it can be used in
            # subsequent iteration
            last_outdir = process_output
            iter_no += 1

        if self.job_id is None:
            self._submit_new_job(steps)
        else:
            self._emr_conn.add_jobflow_steps(self.job_id, steps)

        # Store directory and so it can be used in subsequent iteration;
        # however, only do so after the job was submitted or the steps
        # were added in case an exception occurs
        self._last_outdir = last_outdir
        self._iter_no = iter_no

    def is_done(self, jobdesc=None):
        """
        Returns `True` if the map-reduce job is done, and `False`
        otherwise.

        For all process-step output files that have not been fetched,
        gets the first part of the output file, and checks whether its
        contents begins with the string 'FinalRank'.

        Keyword arguments:
            jobdesc     <boto.emr.JobFlow>      cached description of
                                                jobflow to use

        Special notes:
            WARNING! The usage of this method in your code requires that
            that you used the default output directories in all calls
            to do_iter().
        """

        # Cache the result so we can return immediately without hitting
        # any of the Amazon APIs
        if self._is_done:
            return True
        iter_no = self._get_last_process_step_iter_no(jobdesc=jobdesc)
        if iter_no < 0:
            return False
        i = self._last_process_step_iter_no

        while i < iter_no:
            i += 1
            outdir = self._get_default_outdir('process', iter_no=i)
            keyname = self._get_keyname(outdir, 'part-00000')

            bucket = self._s3_conn.get_bucket(self._s3_bucket)
            key = bucket.get_key(keyname)
            contents = ''

            if key is not None:
                contents = key.next()  # get first chunk of the output file
            if contents.startswith('FinalRank'):
                self._is_done = True  # cache result
                break

        self._last_process_step_iter_no = i

        return self._is_done

    def is_alive(self, jobdesc=None):
        """
        Checks whether the jobflow has completed, failed, or been
        terminated.

        Keyword arguments:
            jobdesc     <boto.emr.JobFlow>      cached description of
                                                jobflow to use

        Special notes:
            WARNING! This method should only be called **after**
            is_done() in order to be able to distinguish between the
            cases where the map-reduce job has outputted 'FinalRank'
            on its final iteration and has a 'COMPLETED' state.
        """

        if jobdesc is None:
            jobdesc = self.describe()

        if jobdesc["cluster"].status.state in ('TERMINATED_WITH_ERRORS',
                                               'TERMINATED'):
            return False

        return True

    def terminate(self):
        """
        Terminates a running map-reduce job.
        """

        if not self.job_id:
            raise RankmaniacError('No job is running.')

        self._emr_conn.terminate_jobflow(self.job_id)
        self.job_id = None

        self._reset()

    def download(self, outdir='results'):
        """
        Downloads the results from Amazon S3 to the local directory.

        Keyword arguments:
            outdir      <str>       the base directory to which to
                                    download contents.

        Special notes:
            This method downloads all keys (files) from the configured
            bucket for this particular team. It creates subdirectories
            as needed.
        """

        bucket = self._s3_conn.get_bucket(self._s3_bucket)
        keys = bucket.list(prefix=self._get_keyname())
        for key in keys:
            keyname = key.name
            # Ignore folder keys
            if '$' not in keyname:
                suffix = keyname.split('/')[1:]  # removes team identifier
                filename = os.path.join(outdir, *suffix)
                dirname = os.path.dirname(filename)

                if not os.path.exists(dirname):
                    os.makedirs(dirname)

                key.get_contents_to_filename(filename)

    def describe(self):
        """
        Gets the current map-reduce job details.

        Returns a boto.emr.emrobject.JobFlow object.

        Special notes:
            The JobFlow object has the following relevant fields.
                state       <str>           the state of the job flow,
                                            either COMPLETED
                                                 | FAILED
                                                 | TERMINATED
                                                 | RUNNING
                                                 | SHUTTING_DOWN
                                                 | STARTING
                                                 | WAITING

                steps       <list(boto.emr.emrobject.Step)>
                            a list of the step details in the workflow.

            The Step object has the following relevant fields.
                state               <str>       the state of the step.

                startdatetime       <str>       the start time of the
                                                job.

                enddatetime         <str>       the end time of the job.

            WARNING! Amazon has an upper-limit on the frequency with
            which you can call this method; we have had success with
            calling it at most once every 10 seconds.
        """

        if not self.job_id:
            raise RankmaniacError('No job is running.')

        cinfo = self._emr_conn.describe_cluster(self.job_id)
        sinfo1 = self._emr_conn.list_steps(self.job_id)
        steps = sinfo1.steps

        if "marker" in dir(sinfo1):
            sinfo2 = self._emr_conn.list_steps(self.job_id,
                                               marker=sinfo1.marker)
            steps += sinfo2.steps

        return {"cluster": cinfo, "steps": steps}

    def _get_last_process_step_iter_no(self, jobdesc=None):
        """
        Returns the most recently process-step of the job flow that has
        been completed.

        Keyword arguments:
            jobdesc     <boto.emr.JobFlow>      cached description of
                                                jobflow to use
        """

        if jobdesc is None:
            jobdesc = self.describe()
        steps = jobdesc["steps"]

        cnt = 0
        for i in range(len(steps)):
            step = steps[i]
            if step.status.state != 'COMPLETED':
                continue

            cnt += 1

        return cnt / 2 - 1

    def _get_default_outdir(self, name, iter_no=None):
        """
        Returns the default output directory, which is 'iter_no/name/'.
        """

        if iter_no is None:
            iter_no = self._iter_no

        # Return iter_no/name/ **with** the trailing slash
        return '%s/%s/' % (iter_no, name)

    def _submit_new_job(self, steps):
        """
        Submits a new job to run on Amazon EMR.
        """

        if self.job_id is not None:
            raise RankmaniacError('A job is already running.')

        job_name = self._make_name()
        num_instances = self._num_instances
        log_uri = self._get_s3_team_uri('job_logs')
        self.job_id = self._emr_conn.run_jobflow(
            name=job_name,
            steps=steps,
            num_instances=num_instances,
            log_uri=log_uri,
            master_instance_type='m1.medium',
            slave_instance_type='m1.medium',
            ami_version='3.11.0',
            job_flow_role='EMR_EC2_DefaultRole',
            service_role='EMR_DefaultRole')

    def _make_step(self,
                   mapper,
                   reducer,
                   input,
                   output,
                   num_mappers=1,
                   num_reducers=1):
        """
        Returns a new step that runs the specified mapper and reducer,
        reading from the specified input and writing to the specified
        output.
        """

        bucket = self._s3_conn.get_bucket(self._s3_bucket)

        # Clear out current bucket/output contents for team
        keys = bucket.list(prefix=self._get_keyname(output))
        bucket.delete_keys(keys)

        mapper_uri = self._get_s3_team_uri(mapper)
        reducer_uri = self._get_s3_team_uri(reducer)
        step_name = self._make_name()
        step_args = [
            '-files',
            '%s,%s' % (mapper_uri, reducer_uri), '-jobconf',
            'mapred.map.tasks=%d' % (num_mappers), '-jobconf',
            'mapred.reduce.tasks=%d' % (num_reducers)
        ]

        return StreamingStep(name=step_name,
                             step_args=step_args,
                             mapper=mapper,
                             reducer=reducer,
                             input=self._get_s3_team_uri(input),
                             output=self._get_s3_team_uri(output))

    def _make_name(self):
        return strftime('%%s %m-%d-%Y %H:%M:%S', localtime()) % (self.team_id)

    def _get_keyname(self, *args):
        """
        Returns the key name to use in the grading bucket (for the
        particular team).

            'team_id/...'
        """

        return '%s/%s' % (self.team_id, '/'.join(args))

    def _get_s3_team_uri(self, *args):
        """
        Returns the Amazon S3 URI for the team submissions.
        """

        return 's3n://%s/%s' % (self._s3_bucket, self._get_keyname(*args))
예제 #2
0
    for index, cluster in enumerate(clusters.clusters):
        print "[%s] %s" % (index, cluster.id)

    # if there is a command line arg, use it for the cluster_id
    if len(sys.argv) > 1:
        cluster_id = sys.argv[1]
    else:
        if len(clusters.clusters) == 0:
            sys.exit("No EMR clusters running.")
        selected_cluster = input("Select a Cluster: ")
        cluster_id = clusters.clusters[int(selected_cluster)].id

    print cluster_id

    # List EMR Steps
    steps = emr_conn.list_steps(cluster_id)
    step_cnt = 0
    for index, step in enumerate(steps.steps):
        time = dateutil.parser.parse(
            step.status.timeline.creationdatetime).astimezone(tz.tzlocal())
        print "[%s] NAME: %s - STATE: %s - START TIME: %s" % (
            index, step.name, step.status.state,
            time.strftime("%Y-%m-%d %H:%M"))
        step_cnt += 1

    # if there are two command line args, use the second one as the selected step index
    if len(sys.argv) > 2:
        selected_step = sys.argv[2]
    else:
        selected_step = input("Select a Step: ")
예제 #3
0
    for index, cluster in enumerate(clusters.clusters):
        print "[%s] %s" % (index, cluster.id)

    # if there is a command line arg, use it for the cluster_id
    if len(sys.argv) > 1:
        cluster_id = sys.argv[1]
    else:
        if len(clusters.clusters) == 0:
            sys.exit("No EMR clusters running.")
        selected_cluster = input("Select a Cluster: ")
        cluster_id = clusters.clusters[int(selected_cluster)].id

    print cluster_id

    # List EMR Steps
    steps = emr_conn.list_steps(cluster_id)
    step_cnt = 0
    for index, step in enumerate(steps.steps):
        time = dateutil.parser.parse(step.status.timeline.creationdatetime).astimezone(tz.tzlocal())
        print "[%s] NAME: %s - STATE: %s - START TIME: %s" % (index, step.name, step.status.state,
                                                              time.strftime("%Y-%m-%d %H:%M"))
        step_cnt += 1

    # if there are two command line args, use the second one as the selected step index
    if len(sys.argv) > 2:
        selected_step = sys.argv[2]
    else:
        selected_step = input("Select a Step: ")

    step_id = steps.steps[int(selected_step)].id
    print step_id
예제 #4
0
class EmrManager(object):
 
    # Default constructor of the class. Uses default parameters if not provided.
    def __init__(self, parameters):
        try: 
            self.region_name = parameters["region_name"]
            self.access_key = parameters["access_key"]
            self.secret_key = parameters["secret_key"]
            self.ec2_keypair_name = parameters["ec2_keypair_name"]
            self.base_bucket = parameters["base_bucket"]
            self.log_dir = parameters["log_dir"]
            self.emr_status_wait = parameters["emr_status_wait"]
            self.step_status_wait = parameters["step_status_wait"]
            self.emr_cluster_name = parameters["emr_cluster_name"]
        except:
            logging.error("Something went wrong initializing EmrManager")
            sys.exit()

        # Establishing EmrConnection
        self.connection = EmrConnection(self.access_key, self.secret_key,
                             region=RegionInfo(name=self.region_name,
                             endpoint=self.region_name + '.elasticmapreduce.amazonaws.com'))

        self.log_bucket_name = self.base_bucket + self.log_dir
 
    #Method for launching the EMR cluster
    def launch_cluster(self, master_type, slave_type, num_instances, ami_version):
        try:
            #Launching the cluster
            cluster_id = self.connection.run_jobflow(
                             self.emr_cluster_name,
                             self.log_bucket_name,
                             ec2_keyname=self.ec2_keypair_name,
                             keep_alive=True,
                             action_on_failure = 'CANCEL_AND_WAIT',
                             master_instance_type=master_type,
                             slave_instance_type=slave_type,
                             num_instances=num_instances,
                             ami_version=ami_version)

            logging.info("Launching cluster: " + cluster_id + ". Please be patient. Check the status of your cluster in your AWS Console")

            # Checking the state of EMR cluster
            state = self.connection.describe_jobflow(cluster_id).state
            while state != u'COMPLETED' and state != u'SHUTTING_DOWN' and state != u'FAILED' and state != u'WAITING':
                #sleeping to recheck for status.
                time.sleep(int(self.emr_status_wait))
                state = self.connection.describe_jobflow(cluster_id).state
                logging.info("Creating cluster " + cluster_id + ". Status: " + state)
 
            if state == u'SHUTTING_DOWN' or state == u'FAILED':
                logging.error("Launching EMR cluster failed")
                return "ERROR"
 
            #Check if the state is WAITING. Then launch the next steps
            if state == u'WAITING':
                #Finding the master node dns of EMR cluster
                master_dns = self.connection.describe_jobflow(cluster_id).masterpublicdnsname
                logging.info("Launched EMR Cluster Successfully with cluster id:" + cluster_id)
                logging.info("Master node DNS of EMR " + master_dns)
                return cluster_id
        except:
            logging.error("Launching EMR cluster failed")
            return "FAILED"

    # run scripting step in cluster
    def run_scripting_step(self, cluster_id, name, script_path):
        try:
            step = ScriptRunnerStep(name=name, 
                                    step_args=[script_path],
                                    action_on_failure="CONTINUE")
            return self._run_step(cluster_id, step)
        except:
            logging.error("Running scripting step in cluster " + cluster_id + " failed.")
            return "FAILED"

    # run streaming step in cluster
    def run_streaming_step(self, cluster_id, name, mapper_path, reducer_path, input_path, output_path):
        try:
            # bundle files with the job
            files = []
            if mapper_path != "NONE":
                files.append(mapper_path)
                mapper_path = mapper_path.split("/")[-1]
            if reducer_path != "NONE":
                files.append(reducer_path)
                reducer_path = reducer_path.split("/")[-1]
            # build streaming step
            logging.debug("Launching streaming step with mapper: " + mapper_path + " reducer: " + reducer_path + " and files: " + str(files))
            step = StreamingStep(name=name,
                                    step_args=["-files"] + files, 
                                    mapper=mapper_path, 
                                    reducer=reducer_path, 
                                    input=input_path, 
                                    output=output_path, 
                                    action_on_failure="CONTINUE")
            return self._run_step(cluster_id, step)            
        except:
            logging.error("Running streaming step in cluster " + cluster_id + " failed.")
            return "FAILED"

    # run mapreduce jar step in cluster
    def run_jar_step(self, cluster_id, name, jar_path, class_name, input_path, output_path):
        try:
            # build streaming step
            logging.debug("Launching jar step with jar: " + jar_path + " class name: " + class_name + " input: " + input_path + " and output: " + output_path)
            step = JarStep(name=name,
                            jar=jar_path, 
                            step_args= [class_name,
                                        input_path,
                                        output_path])
            return self._run_step(cluster_id, step)            
        except:
            logging.error("Running jar step in cluster " + cluster_id + " failed.")
            return "FAILED"

    def _run_step(self, cluster_id, step):
        step_list = self.connection.add_jobflow_steps(cluster_id, [step])
        step_id = step_list.stepids[0].value

        logging.info("Starting step " + step_id + " in cluster " + cluster_id + ". Please be patient. Check the progress of the job in your AWS Console")

        # Checking the state of the step
        state = self._find_step_state(cluster_id, step_id)
        while state != u'NOT_FOUND' and state != u'ERROR' and state != u'FAILED' and state!=u'COMPLETED':
            #sleeping to recheck for status.
            time.sleep(int(self.step_status_wait))
            state = self._find_step_state(cluster_id, step_id)
            logging.info("Starting step " + step_id + " in cluster " + cluster_id + ". Status: " + state)

        if state == u'FAILED':
            logging.error("Step " + step_id + " failed in cluster: " + cluster_id)
            return "FAILED"
        if state == u'NOT_FOUND':
            logging.error("Step " + step_id + " could not be found in cluster: " + cluster_id)
            return "NOT_FOUND"
        if state == u'ERROR':
            logging.error("Step " + step_id + " produced an error in _find_step_state in cluster: " + cluster_id)
            return "ERROR"

        #Check if the state is WAITING. Then launch the next steps
        if state == u'COMPLETED':
            #Finding the master node dns of EMR cluster
            logging.info("Step " + step_id + " succesfully completed in cluster: " + cluster_id)
            return step_id


    def _find_step_state(self, cluster_id, step_id):
        try:
            step_summary_list = self.connection.list_steps(cluster_id)
            for step_summary in step_summary_list.steps:
                if step_summary.id == step_id:
                    return step_summary.status.state
            return "NOT_FOUND"
        except:
            return "ERROR"

    #Method for terminating the EMR cluster
    def terminate_cluster(self, cluster_id):
        self.connection.terminate_jobflow(cluster_id)
예제 #5
0
class EMRCluster(object):
    '''Representation of an EMR cluster.
     TODO: add bridge to boto interface for unit test.
  '''
    emr_status_delay = 10  # in sec
    emr_status_max_delay = 60  # in sec
    emr_status_max_error = 30  # number of errors
    emr_max_idle = 10 * 60  # 10 min (in sec)
    rate_limit_lock = RateLimitLock()

    def __init__(self, prop):
        '''Constructor, initialize EMR connection.'''
        self.prop = prop
        self.conn = EmrConnection(self.prop.ec2.key, self.prop.ec2.secret)
        self.jobid = None
        self.retry = 0
        self.level = 0
        self.last_update = -1

    @property
    def priority(self):
        '''The priority used in EMRManager.
       The lower value, the higher priority.
    '''
        with EMRCluster.rate_limit_lock:
            if self.jobid is None:
                return 1
            return 0

    def get_instance_groups(self):
        '''Get instance groups to start a cluster.
       It calculates the price with self.level, which indicates the
       price upgrades from the original price.
    '''
        instance_groups = []
        for group in self.prop.emr.instance_groups:
            (num, group_name, instance_type) = group
            level = max(0,
                        min(self.level,
                            len(self.prop.emr.price_upgrade_rate) -
                            1))  # 0 <= level < len(...)
            bprice = self.prop.emr.prices[
                instance_type] * self.prop.emr.price_upgrade_rate[level]
            name = '%s-%s@%f' % (group_name, 'SPOT', bprice)

            # Use on-demand instance if prices are zero.
            if bprice > 0:
                ig = InstanceGroup(num, group_name, instance_type, 'SPOT',
                                   name, '%.3f' % bprice)
            else:
                ig = InstanceGroup(num, group_name, instance_type, 'ON_DEMAND',
                                   name)

            instance_groups.append(ig)

        return instance_groups

    def get_bootstrap_actions(self):
        '''Get list of bootstrap actions from property'''
        actions = []
        for bootstrap_action in self.prop.emr.bootstrap_actions:
            assert len(bootstrap_action
                       ) >= 2, 'Wrong bootstrap action definition: ' + str(
                           bootstrap_action)
            actions.append(
                BootstrapAction(bootstrap_action[0], bootstrap_action[1],
                                bootstrap_action[2:]))
        return actions

    @synchronized
    def start(self):
        '''Start a EMR cluster.'''
        # emr.project_name is required
        if self.prop.emr.project_name is None:
            raise ValueError('emr.project_name is not set')

        self.last_update = time.time()
        with EMRCluster.rate_limit_lock:
            self.jobid = self.conn.run_jobflow(
                name=self.prop.emr.cluster_name,
                ec2_keyname=self.prop.emr.keyname,
                log_uri=self.prop.emr.log_uri,
                ami_version=self.prop.emr.ami_version,
                bootstrap_actions=self.get_bootstrap_actions(),
                keep_alive=True,
                action_on_failure='CONTINUE',
                api_params={'VisibleToAllUsers': 'true'},
                instance_groups=self.get_instance_groups())
        message('Job flow created: %s', self.jobid)

        # Tag EC2 instances to allow future analysis
        tags = {
            'FlowControl': 'Briefly',
            'Project': self.prop.emr.project_name
        }
        if self.prop.emr.tags is not None:
            assert isinstance(self.prop.emr.tags, dict)
            tags = dict(tags.items() + self.prop.emr.tags.items())
        self.conn.add_tags(self.jobid, tags)

    @synchronized
    def terminate(self, level_upgrade=0):
        '''Terminate this EMR cluster.'''
        if self.jobid is None:
            return

        self.level += level_upgrade  # upgrade to another price level

        message('Terminate jobflow: %s', self.jobid)
        for i in range(3):
            try:
                with EMRCluster.rate_limit_lock:
                    self.conn.terminate_jobflow(self.jobid)
                break
            except Exception as e:
                message('Unable to terminate job flow: %s', self.jobid)
                message(traceback.format_exc())
        # We have to set jobid as None to create new cluster;
        # otherwise, run_steps will keep launching jobs on the bad cluster.
        self.jobid = None

    def is_idle(self):
        '''Check if this EMR cluster is idle?'''
        return (not self.jobid is None) and (
            (time.time() - self.last_update) > self.emr_max_idle)

    def get_steps(self, node):
        '''Get the jar step from the node.'''
        step = JarStep(name=node.config.sub(node.config.emr.step_name,
                                            node_hash=node.hash()),
                       main_class=node.config.main_class,
                       jar=node.config.hadoop.jar,
                       action_on_failure='CONTINUE',
                       step_args=node.process_args(*node.config.args))
        return [step]

    def get_step_index(self, step_id):
        '''Get the index of a step given step_id (1 based)'''
        steps = [
            step.id
            for step in reversed(self.conn.list_steps(self.jobid).steps)
            if step.status is not None
        ]

        # revert the index since latest step is on top of the list
        return steps.index(step_id) + 1

    def run_steps(self, node, wait=True):
        '''Main loop to execute a node.
       It will block until step complete or failure, and will raise
       exception for failures so that the step will be retried.
       TODO: add timeouts for each step?
       TODO: dynamic increase cluster size?
    '''
        if not self.jobid:
            self.start()

        try:
            with EMRCluster.rate_limit_lock:
                # Here we just add single step. And get the step_id for fallowing checks.
                step_id = self.conn.add_jobflow_steps(
                    self.jobid, self.get_steps(node)).stepids[0].value
                assert step_id is not None
        except Exception as e:
            node.log('Unable to add jobflow steps: %s', node.hash())
            node.log('%s', traceback.format_exc())
            raise HadoopFailure()

        status_error_counter = 0
        step_status = 'PENDING'
        step_index = None
        step_start = time.time()

        # notify the node with status.
        node.notify_status('Running on EMR: %s', self.jobid)

        while wait and step_status in ['PENDING', 'RUNNING']:
            try:
                # wait first for the status turning to 'RUNNING' from 'WAITING'. Exponential delay for errors.
                # Cap delay to a predefined limit.
                delay = min(self.emr_status_delay * (2**status_error_counter),
                            self.emr_status_max_delay)
                time.sleep(delay)

                # Keep current cluster alive.
                self.last_update = time.time()

                # Get current cluster status. May raise exception due to EMR request throttle.
                cluster_state = self.conn.describe_cluster(
                    self.jobid).status.state

                if step_index is None:
                    step_index = self.get_step_index(step_id)
                    node.log('Step #: %d', step_index)
                    node.log('Log URI: %s/%s/steps/%d/',
                             node.config.emr.log_uri, self.jobid, step_index)

                step_status = self.conn.describe_step(self.jobid,
                                                      step_id).status.state
                status_error_counter = 0  # reset counter
                node.log("%s: %s %s", self.jobid, cluster_state, step_status)

                if cluster_state in [
                        'TERMINATING', 'TERMINATED', 'TERMINATED_WITH_ERRORS'
                ]:  # cluster kill (maybe due to spot price), upgrade.
                    self.terminate(1)
                    break

                if (
                        time.time() - step_start
                ) > node.config.emr.step_timeout:  # Step running too long? EMR cluster idle.
                    node.log('Step running too long. Restart with new cluster')
                    self.terminate()
                    break

            except KeyboardInterrupt:
                raise
            except Exception as e:
                node.log('EMR loop exception: %d error(s)',
                         status_error_counter)
                status_error_counter += 1
                if status_error_counter > self.emr_status_max_error:
                    self.terminate()
                    node.log('Too many errors in EMR loop')
                    node.log('Exception: %s', traceback.format_exc())
                    raise

        if step_status != 'COMPLETED':
            raise HadoopFailure()
예제 #6
0
class Rankmaniac:
    """
    (wrapper class)

    This class presents a simple wrapper around the AWS SDK. It strives
    to provide all the functionality required to run map-reduce
    (Hadoop) on Amazon. This way the students do not need to worry about
    learning the API for Amazon S3 and EMR, and instead can focus on
    computing pagerank quickly!
    """

    DefaultRegionName = 'us-west-2'
    DefaultRegionEndpoint = 'elasticmapreduce.us-west-2.amazonaws.com'

    def __init__(self, team_id, access_key, secret_key,
                 bucket='cs144students'):
        """
        (constructor)

        Creates a new instance of the Rankmaniac class for a specific
        team using the provided credentials.

        Arguments:
            team_id       <str>     the team identifier, which may be
                                    differ slightly from the actual team
                                    name.

            access_key    <str>     the AWS access key identifier.
            secret_key    <str>     the AWS secret acess key.

        Keyword arguments:
            bucket        <str>     the S3 bucket name.
        """

        region = RegionInfo(None, self.DefaultRegionName,
                            self.DefaultRegionEndpoint)

        self._s3_bucket = bucket
        self._s3_conn = S3Connection(access_key, secret_key)
        self._emr_conn = EmrConnection(access_key, secret_key, region=region)

        self.team_id = team_id
        self.job_id = None

        self._reset()
        self._num_instances = 1

    def _reset(self):
        """
        Resets the internal state of the job and submission.
        """

        self._iter_no = 0
        self._infile = None
        self._last_outdir = None

        self._last_process_step_iter_no = -1
        self._is_done = False

    def __del__(self):
        """
        (destructor)

        Terminates the map-reduce job if any, and closes the connections
        to Amazon S3 and EMR.
        """

        if self.job_id is not None:
            self.terminate()

        self._s3_conn.close()
        self._emr_conn.close()

    def __enter__(self):
        """
        Used for `with` syntax. Simply returns this instance since the
        set-up has all been done in the constructor.
        """

        return self

    def __exit__(self, type, value, traceback):
        """
        Refer to __del__().
        """

        self.__del__()
        return False # do not swallow any exceptions

    def upload(self, indir='data'):
        """
        Uploads the local data to Amazon S3 under the configured bucket
        and key prefix (the team identifier). This way the code can be
        accessed by Amazon EMR to compute pagerank.

        Keyword arguments:
            indir       <str>       the base directory from which to
                                    upload contents.

        Special notes:
            This method only uploads **files** in the specified
            directory. It does not scan through subdirectories.

            WARNING! This method removes all previous (or ongoing)
            submission results, so it is unsafe to call while a job is
            already running (and possibly started elsewhere).
        """

        if self.job_id is not None:
            raise RankmaniacError('A job is already running.')

        bucket = self._s3_conn.get_bucket(self._s3_bucket)

        # Clear out current bucket contents for team
        keys = bucket.list(prefix=self._get_keyname())
        bucket.delete_keys(keys)

        for filename in os.listdir(indir):
            relpath = os.path.join(indir, filename)
            if os.path.isfile(relpath):
                keyname = self._get_keyname(filename)
                key = bucket.new_key(keyname)
                key.set_contents_from_filename(relpath)

    def set_infile(self, filename):
        """
        Sets the data file to use for the first iteration of the
        pagerank step in the map-reduce job.
        """

        if self.job_id is not None:
            raise RankmaniacError('A job is already running.')

        self._infile = filename

    def do_iter(self, pagerank_mapper, pagerank_reducer,
                process_mapper, process_reducer,
                num_pagerank_mappers=1, num_pagerank_reducers=1):
        """
        Adds a pagerank step and a process step to the current job.
        """

        self.do_niter(1, pagerank_mapper, pagerank_reducer,
                      process_mapper, process_reducer,
                      num_pagerank_mappers=num_pagerank_mappers,
                      num_pagerank_reducers=num_pagerank_reducers)

    def do_niter(self, n, pagerank_mapper, pagerank_reducer,
                 process_mapper, process_reducer,
                 num_pagerank_mappers=1, num_pagerank_reducers=1):
        """
        Adds N pagerank steps and N process steps to the current job.
        """

        num_process_mappers = 1
        num_process_reducers = 1

        iter_no = self._iter_no
        last_outdir = self._last_outdir
        steps = []
        for _ in range(n):
            if iter_no == 0:
                pagerank_input = self._infile
            elif iter_no > 0:
                pagerank_input = last_outdir

            pagerank_output = self._get_default_outdir('pagerank', iter_no)

            # Output from the pagerank step becomes input to process step
            process_input = pagerank_output

            process_output = self._get_default_outdir('process', iter_no)

            pagerank_step = self._make_step(pagerank_mapper, pagerank_reducer,
                                            pagerank_input, pagerank_output,
                                            num_pagerank_mappers,
                                            num_pagerank_reducers)

            process_step = self._make_step(process_mapper, process_reducer,
                                           process_input, process_output,
                                           num_process_mappers,
                                           num_process_reducers)

            steps.extend([pagerank_step, process_step])

            # Store `process_output` directory so it can be used in
            # subsequent iteration
            last_outdir = process_output
            iter_no += 1

        if self.job_id is None:
            self._submit_new_job(steps)
        else:
            self._emr_conn.add_jobflow_steps(self.job_id, steps)

        # Store directory and so it can be used in subsequent iteration;
        # however, only do so after the job was submitted or the steps
        # were added in case an exception occurs
        self._last_outdir = last_outdir
        self._iter_no = iter_no

    def is_done(self, jobdesc=None):
        """
        Returns `True` if the map-reduce job is done, and `False`
        otherwise.

        For all process-step output files that have not been fetched,
        gets the first part of the output file, and checks whether its
        contents begins with the string 'FinalRank'.

        Keyword arguments:
            jobdesc     <boto.emr.JobFlow>      cached description of
                                                jobflow to use

        Special notes:
            WARNING! The usage of this method in your code requires that
            that you used the default output directories in all calls
            to do_iter().
        """

        # Cache the result so we can return immediately without hitting
        # any of the Amazon APIs
        if self._is_done:
            return True
        iter_no = self._get_last_process_step_iter_no(jobdesc=jobdesc)
        if iter_no < 0:
            return False
        i = self._last_process_step_iter_no

        while i < iter_no:
            i += 1
            outdir = self._get_default_outdir('process', iter_no=i)
            keyname = self._get_keyname(outdir, 'part-00000')

            bucket = self._s3_conn.get_bucket(self._s3_bucket)
            key = bucket.get_key(keyname)
            contents = ''

            if key is not None:
                contents = key.next() # get first chunk of the output file
            if contents.startswith('FinalRank'):
                self._is_done = True # cache result
                break

        self._last_process_step_iter_no = i

        return self._is_done

    def is_alive(self, jobdesc=None):
        """
        Checks whether the jobflow has completed, failed, or been
        terminated.

        Keyword arguments:
            jobdesc     <boto.emr.JobFlow>      cached description of
                                                jobflow to use

        Special notes:
            WARNING! This method should only be called **after**
            is_done() in order to be able to distinguish between the
            cases where the map-reduce job has outputted 'FinalRank'
            on its final iteration and has a 'COMPLETED' state.
        """

        if jobdesc is None:
            jobdesc = self.describe()

        if jobdesc["cluster"].status.state in ('TERMINATED_WITH_ERRORS', 'TERMINATED'):
            return False

        return True

    def terminate(self):
        """
        Terminates a running map-reduce job.
        """

        if not self.job_id:
            raise RankmaniacError('No job is running.')

        self._emr_conn.terminate_jobflow(self.job_id)
        self.job_id = None

        self._reset()

    def download(self, outdir='results'):
        """
        Downloads the results from Amazon S3 to the local directory.

        Keyword arguments:
            outdir      <str>       the base directory to which to
                                    download contents.

        Special notes:
            This method downloads all keys (files) from the configured
            bucket for this particular team. It creates subdirectories
            as needed.
        """

        bucket = self._s3_conn.get_bucket(self._s3_bucket)
        keys = bucket.list(prefix=self._get_keyname())
        for key in keys:
            keyname = key.name
            # Ignore folder keys
            if '$' not in keyname:
                suffix = keyname.split('/')[1:] # removes team identifier
                filename = os.path.join(outdir, *suffix)
                dirname = os.path.dirname(filename)

                if not os.path.exists(dirname):
                    os.makedirs(dirname)

                key.get_contents_to_filename(filename)

    def describe(self):
        """
        Gets the current map-reduce job details.

        Returns a boto.emr.emrobject.JobFlow object.

        Special notes:
            The JobFlow object has the following relevant fields.
                state       <str>           the state of the job flow,
                                            either COMPLETED
                                                 | FAILED
                                                 | TERMINATED
                                                 | RUNNING
                                                 | SHUTTING_DOWN
                                                 | STARTING
                                                 | WAITING

                steps       <list(boto.emr.emrobject.Step)>
                            a list of the step details in the workflow.

            The Step object has the following relevant fields.
                state               <str>       the state of the step.

                startdatetime       <str>       the start time of the
                                                job.

                enddatetime         <str>       the end time of the job.

            WARNING! Amazon has an upper-limit on the frequency with
            which you can call this method; we have had success with
            calling it at most once every 10 seconds.
        """

        if not self.job_id:
            raise RankmaniacError('No job is running.')
            
        cinfo = self._emr_conn.describe_cluster(self.job_id)
        sinfo1 = self._emr_conn.list_steps(self.job_id)
        steps = sinfo1.steps

        if "marker" in dir(sinfo1):
            sinfo2 = self._emr_conn.list_steps(self.job_id, marker=sinfo1.marker)
            steps += sinfo2.steps

        return {"cluster": cinfo, "steps": steps}

    def _get_last_process_step_iter_no(self, jobdesc=None):
        """
        Returns the most recently process-step of the job flow that has
        been completed.

        Keyword arguments:
            jobdesc     <boto.emr.JobFlow>      cached description of
                                                jobflow to use
        """

        if jobdesc is None:
            jobdesc = self.describe()
        steps = jobdesc["steps"]
    
        cnt = 0
        for i in range(len(steps)):
            step = steps[i]
            if step.status.state != 'COMPLETED':
                continue

            cnt += 1

        return cnt / 2 - 1

    def _get_default_outdir(self, name, iter_no=None):
        """
        Returns the default output directory, which is 'iter_no/name/'.
        """

        if iter_no is None:
            iter_no = self._iter_no

        # Return iter_no/name/ **with** the trailing slash
        return '%s/%s/' % (iter_no, name)

    def _submit_new_job(self, steps):
        """
        Submits a new job to run on Amazon EMR.
        """

        if self.job_id is not None:
            raise RankmaniacError('A job is already running.')

        job_name = self._make_name()
        num_instances = self._num_instances
        log_uri = self._get_s3_team_uri('job_logs')
        self.job_id = self._emr_conn.run_jobflow(name=job_name,
                                                 steps=steps,
                                                 num_instances=num_instances,
                                                 log_uri=log_uri,
                                                 master_instance_type='m1.medium',
                                                 slave_instance_type='m1.medium',
                                                 ami_version='3.11.0',
                                                 job_flow_role='EMR_EC2_DefaultRole',
                                                 service_role='EMR_DefaultRole')

    def _make_step(self, mapper, reducer, input, output,
                   num_mappers=1, num_reducers=1):
        """
        Returns a new step that runs the specified mapper and reducer,
        reading from the specified input and writing to the specified
        output.
        """

        bucket = self._s3_conn.get_bucket(self._s3_bucket)

        # Clear out current bucket/output contents for team
        keys = bucket.list(prefix=self._get_keyname(output))
        bucket.delete_keys(keys)

        mapper_uri = self._get_s3_team_uri(mapper)
        reducer_uri = self._get_s3_team_uri(reducer)
        step_name = self._make_name()
        step_args = ['-files', '%s,%s' % (mapper_uri, reducer_uri),
                     '-jobconf', 'mapred.map.tasks=%d' % (num_mappers),
                     '-jobconf', 'mapred.reduce.tasks=%d' % (num_reducers)]

        return StreamingStep(name=step_name,
                            step_args=step_args,
                            mapper=mapper,
                            reducer=reducer,
                            input=self._get_s3_team_uri(input),
                            output=self._get_s3_team_uri(output))

    def _make_name(self):
        return strftime('%%s %m-%d-%Y %H:%M:%S', localtime()) % (self.team_id)

    def _get_keyname(self, *args):
        """
        Returns the key name to use in the grading bucket (for the
        particular team).

            'team_id/...'
        """

        return '%s/%s' % (self.team_id, '/'.join(args))

    def _get_s3_team_uri(self, *args):
        """
        Returns the Amazon S3 URI for the team submissions.
        """

        return 's3n://%s/%s' % (self._s3_bucket, self._get_keyname(*args))