Ejemplo n.º 1
0
 def get_steps(self, node):
     '''Get the jar step from the node.'''
     step = JarStep(name=node.config.sub(node.config.emr.step_name,
                                         node_hash=node.hash()),
                    main_class=node.config.main_class,
                    jar=node.config.hadoop.jar,
                    action_on_failure='CONTINUE',
                    step_args=node.process_args(*node.config.args))
     return [step]
Ejemplo n.º 2
0
def run_emr(args):
    validate_input_path(args.enriched_archive)
    if args.since is not None:
        validate_since(args.since)
        since_arg = ["--since", args.since]
    else:
        since_arg = []

    c = boto.connect_s3(profile_name=args.profile)
    jar_bucket = c.get_bucket(args.enriched_archive.split("/")[2])
    r = get_valid_region(jar_bucket.get_location())

    if args.jar is None:
        path = "s3://snowplow-hosted-assets/5-data-modeling/event-manifest-populator/" + JAR_FILE
    else:
        path = args.jar

    step_args = [
        "spark-submit",
        "--deploy-mode",
        "cluster",
        "--class",
        "com.snowplowanalytics.snowplow.eventpopulator.Main",
        path,
        "--enriched-archive",
        args.enriched_archive,
        "--storage-config",
        base64encode(args.storage_config),
        "--resolver",
        base64encode(args.resolver),
    ] + since_arg

    steps = [
        JarStep("Run Event Manifest Populator Spark job",
                "command-runner.jar",
                step_args=step_args)
    ]

    conn = boto.emr.connect_to_region(r, profile_name=args.profile)
    job_id = conn.run_jobflow(name="Snowplow Event Manifest Populator",
                              log_uri=args.log_path,
                              ec2_keyname=args.ec2_keyname,
                              master_instance_type="m3.xlarge",
                              slave_instance_type="m3.xlarge",
                              num_instances=3,
                              enable_debugging=True,
                              steps=steps,
                              job_flow_role="EMR_EC2_DefaultRole",
                              service_role="EMR_DefaultRole",
                              visible_to_all_users=True,
                              api_params={
                                  'ReleaseLabel': 'emr-5.4.0',
                                  'Applications.member.1.Name': 'Spark',
                                  'Applications.member.2.Name': 'Hadoop',
                              })
    print("Started jobflow " + job_id)
Ejemplo n.º 3
0
    def execute(self, jar_path, args):
        from boto.emr.step import JarStep

        s3_jar_path = s3_upload(self.s3_bucket, jar_path, self.get_s3_working_dir(jar_path))
        print("Uploading jar to s3 : %s -> %s" % (jar_path, s3_jar_path))

        print("Add jobflow step")
        step = JarStep(name=self.get_emr_job_name(), jar=s3_jar_path, step_args=args)
        ret_steps = self.emr_conn.add_jobflow_steps(self.job_flow_id, steps=[step])

        print("Waiting jobflow steps...")
        return emr_wait_steps(self.emr_conn, self.job_flow_id, ret_steps)
Ejemplo n.º 4
0
    def execute(self, jar_path, args):
        from boto.emr.step import JarStep

        s3_jar_path = s3_upload(self.s3_bucket, jar_path, self.get_s3_working_dir(jar_path))
        # s3_jar_path = "s3://run-jars/jar/mahout-core-1.0-SNAPSHOT-job.jar"
        print("Uploading jar to s3 : %s -> %s" % (jar_path, s3_jar_path))

        print("Add jobflow step")
        step = JarStep(name='cl_filter', jar=s3_jar_path, step_args=args)
        self.emr_conn.add_jobflow_steps(self.job_flow_id, steps=[step])

        print("Waiting jobflow step done")
        emr_wait_job(self.emr_conn, self.job_flow_id)
Ejemplo n.º 5
0
 def run_jar_step(self, cluster_id, name, jar_path, class_name, input_path, output_path):
     try:
         # build streaming step
         logging.debug("Launching jar step with jar: " + jar_path + " class name: " + class_name + " input: " + input_path + " and output: " + output_path)
         step = JarStep(name=name,
                         jar=jar_path, 
                         step_args= [class_name,
                                     input_path,
                                     output_path])
         return self._run_step(cluster_id, step)            
     except:
         logging.error("Running jar step in cluster " + cluster_id + " failed.")
         return "FAILED"
Ejemplo n.º 6
0
 def emr_execute_jar(self,
                     job_name,
                     s3_jar_path,
                     jar_args,
                     main_class="",
                     action_on_failure='CONTINUE'):
     steps = [
         JarStep(name=job_name,
                 jar=s3_jar_path,
                 main_class=main_class,
                 step_args=jar_args,
                 action_on_failure=action_on_failure)
     ]
     ret_steps = self.emr_conn.add_jobflow_steps(self.jobflow_id,
                                                 steps=steps)
     step_ids = [s.value for s in ret_steps.stepids]
     return step_ids
Ejemplo n.º 7
0
    def run_jobflow(self,
                    name,
                    log_uri=None,
                    ec2_keyname=None,
                    availability_zone=None,
                    master_instance_type='m1.small',
                    slave_instance_type='m1.small',
                    num_instances=1,
                    action_on_failure='TERMINATE_JOB_FLOW',
                    keep_alive=False,
                    enable_debugging=False,
                    hadoop_version=None,
                    steps=[],
                    bootstrap_actions=[],
                    instance_groups=None,
                    additional_info=None,
                    ami_version=None,
                    api_params=None,
                    visible_to_all_users=None,
                    job_flow_role=None):
        """
        Runs a job flow
        :type name: str
        :param name: Name of the job flow

        :type log_uri: str
        :param log_uri: URI of the S3 bucket to place logs

        :type ec2_keyname: str
        :param ec2_keyname: EC2 key used for the instances

        :type availability_zone: str
        :param availability_zone: EC2 availability zone of the cluster

        :type master_instance_type: str
        :param master_instance_type: EC2 instance type of the master

        :type slave_instance_type: str
        :param slave_instance_type: EC2 instance type of the slave nodes

        :type num_instances: int
        :param num_instances: Number of instances in the Hadoop cluster

        :type action_on_failure: str
        :param action_on_failure: Action to take if a step terminates

        :type keep_alive: bool
        :param keep_alive: Denotes whether the cluster should stay
            alive upon completion

        :type enable_debugging: bool
        :param enable_debugging: Denotes whether AWS console debugging
            should be enabled.

        :type hadoop_version: str
        :param hadoop_version: Version of Hadoop to use. This no longer
            defaults to '0.20' and now uses the AMI default.

        :type steps: list(boto.emr.Step)
        :param steps: List of steps to add with the job

        :type bootstrap_actions: list(boto.emr.BootstrapAction)
        :param bootstrap_actions: List of bootstrap actions that run
            before Hadoop starts.

        :type instance_groups: list(boto.emr.InstanceGroup)
        :param instance_groups: Optional list of instance groups to
            use when creating this job.
            NB: When provided, this argument supersedes num_instances
            and master/slave_instance_type.

        :type ami_version: str
        :param ami_version: Amazon Machine Image (AMI) version to use
            for instances. Values accepted by EMR are '1.0', '2.0', and
            'latest'; EMR currently defaults to '1.0' if you don't set
            'ami_version'.

        :type additional_info: JSON str
        :param additional_info: A JSON string for selecting additional features

        :type api_params: dict
        :param api_params: a dictionary of additional parameters to pass
            directly to the EMR API (so you don't have to upgrade boto to
            use new EMR features). You can also delete an API parameter
            by setting it to None.

        :type visible_to_all_users: bool
        :param visible_to_all_users: Whether the job flow is visible to all IAM
            users of the AWS account associated with the job flow. If this
            value is set to ``True``, all IAM users of that AWS
            account can view and (if they have the proper policy permissions
            set) manage the job flow. If it is set to ``False``, only
            the IAM user that created the job flow can view and manage
            it.

        :type job_flow_role: str
        :param job_flow_role: An IAM role for the job flow. The EC2
            instances of the job flow assume this role. The default role is
            ``EMRJobflowDefault``. In order to use the default role,
            you must have already created it using the CLI.

        :rtype: str
        :return: The jobflow id
        """
        params = {}
        if action_on_failure:
            params['ActionOnFailure'] = action_on_failure
        if log_uri:
            params['LogUri'] = log_uri
        params['Name'] = name

        # Common instance args
        common_params = self._build_instance_common_args(
            ec2_keyname, availability_zone, keep_alive, hadoop_version)
        params.update(common_params)

        # NB: according to the AWS API's error message, we must
        # "configure instances either using instance count, master and
        # slave instance type or instance groups but not both."
        #
        # Thus we switch here on the truthiness of instance_groups.
        if not instance_groups:
            # Instance args (the common case)
            instance_params = self._build_instance_count_and_type_args(
                master_instance_type, slave_instance_type, num_instances)
            params.update(instance_params)
        else:
            # Instance group args (for spot instances or a heterogenous cluster)
            list_args = self._build_instance_group_list_args(instance_groups)
            instance_params = dict(
                ('Instances.%s' % k, v) for k, v in list_args.iteritems())
            params.update(instance_params)

        # Debugging step from EMR API docs
        if enable_debugging:
            debugging_step = JarStep(name='Setup Hadoop Debugging',
                                     action_on_failure='TERMINATE_JOB_FLOW',
                                     main_class=None,
                                     jar=self.DebuggingJar,
                                     step_args=self.DebuggingArgs)
            steps.insert(0, debugging_step)

        # Step args
        if steps:
            step_args = [self._build_step_args(step) for step in steps]
            params.update(self._build_step_list(step_args))

        if bootstrap_actions:
            bootstrap_action_args = [
                self._build_bootstrap_action_args(bootstrap_action)
                for bootstrap_action in bootstrap_actions
            ]
            params.update(
                self._build_bootstrap_action_list(bootstrap_action_args))

        if ami_version:
            params['AmiVersion'] = ami_version

        if additional_info is not None:
            params['AdditionalInfo'] = additional_info

        if api_params:
            for key, value in api_params.iteritems():
                if value is None:
                    params.pop(key, None)
                else:
                    params[key] = value

        if visible_to_all_users is not None:
            if visible_to_all_users:
                params['VisibleToAllUsers'] = 'true'
            else:
                params['VisibleToAllUsers'] = 'false'

        if job_flow_role is not None:
            params['JobFlowRole'] = job_flow_role

        response = self.get_object('RunJobFlow',
                                   params,
                                   RunJobFlowResponse,
                                   verb='POST')
        return response.jobflowid
Ejemplo n.º 8
0
    def run_jobflow(self,
                    name, log_uri, ec2_keyname=None, availability_zone=None,
                    master_instance_type='m1.small',
                    slave_instance_type='m1.small', num_instances=1,
                    action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False,
                    enable_debugging=False,
                    hadoop_version=None,
                    steps=None,
                    bootstrap_actions=[],
                    instance_groups=None,
                    additional_info=None,
                    ami_version=None,
                    now=None,
                    api_params=None):
        """Mock of run_jobflow().

        If you set log_uri to None, you can get a jobflow with no loguri
        attribute, which is useful for testing.
        """
        self._enforce_strict_ssl()

        if now is None:
            now = datetime.utcnow()

        # default and validate Hadoop and AMI versions

        # if nothing specified, use 0.20 for backwards compatibility
        if ami_version is None and hadoop_version is None:
            hadoop_version = '0.20'

        # check if AMI version is valid
        if ami_version not in AMI_VERSION_TO_HADOOP_VERSIONS:
            raise boto.exception.EmrResponseError(400, 'Bad Request')

        available_hadoop_versions = AMI_VERSION_TO_HADOOP_VERSIONS[ami_version]

        if hadoop_version is None:
            hadoop_version = available_hadoop_versions[0]
        elif hadoop_version not in available_hadoop_versions:
            raise boto.exception.EmrResponseError(400, 'Bad Request')

        # create a MockEmrObject corresponding to the job flow. We only
        # need to fill in the fields that EMRJobRunner uses
        steps = steps or []

        jobflow_id = 'j-MOCKJOBFLOW%d' % len(self.mock_emr_job_flows)
        assert jobflow_id not in self.mock_emr_job_flows

        def make_fake_action(real_action):
            return MockEmrObject(name=real_action.name,
                                 path=real_action.path,
                                 args=[MockEmrObject(value=str(v)) for v \
                                       in real_action.bootstrap_action_args])

        # create a MockEmrObject corresponding to the job flow. We only
        # need to fill in the fields that EMRJobRunnerUses
        if not instance_groups:
            mock_groups = [
                MockEmrObject(
                    instancerequestcount='1',
                    instancerole='MASTER',
                    instancerunningcount='0',
                    instancetype=master_instance_type,
                    market='ON_DEMAND',
                    name='master',
                ),
            ]
            if num_instances > 1:
                mock_groups.append(
                    MockEmrObject(
                        instancerequestcount=str(num_instances - 1),
                        instancerole='CORE',
                        instancerunningcount='0',
                        instancetype=slave_instance_type,
                        market='ON_DEMAND',
                        name='core',
                    ),
                )
            else:
                # don't display slave instance type if there are no slaves
                slave_instance_type = None
        else:
            slave_instance_type = None
            num_instances = 0

            mock_groups = []
            roles = set()

            for instance_group in instance_groups:
                if instance_group.num_instances < 1:
                    raise boto.exception.EmrResponseError(
                        400, 'Bad Request', body=err_xml(
                        'An instance group must have at least one instance'))

                emr_group = MockEmrObject(
                    instancerequestcount=str(instance_group.num_instances),
                    instancerole=instance_group.role,
                    instancerunningcount='0',
                    instancetype=instance_group.type,
                    market=instance_group.market,
                    name=instance_group.name,
                )
                if instance_group.market == 'SPOT':
                    bid_price = instance_group.bidprice

                    # simulate EMR's bid price validation
                    try:
                        float(bid_price)
                    except (TypeError, ValueError):
                        raise boto.exception.EmrResponseError(
                            400, 'Bad Request', body=err_xml(
                            'The bid price supplied for an instance group is'
                            ' invalid'))

                    if ('.' in bid_price and
                        len(bid_price.split('.', 1)[1]) > 3):
                        raise boto.exception.EmrResponseError(
                            400, 'Bad Request', body=err_xml(
                            'No more than 3 digits are allowed after decimal'
                            ' place in bid price'))

                    emr_group.bidprice = bid_price

                if instance_group.role in roles:
                    role_desc = instance_group.role.lower()
                    raise boto.exception.EmrResponseError(
                        400, 'Bad Request', body=err_xml(
                        'Multiple %s instance groups supplied, you'
                        ' must specify exactly one %s instance group' %
                        (role_desc, role_desc)))

                if instance_group.role == 'MASTER':
                    if instance_group.num_instances != 1:
                        raise boto.exception.EmrResponseError(
                            400, 'Bad Request', body=err_xml(
                            'A master instance group must specify a single'
                            ' instance'))

                    master_instance_type = instance_group.type

                elif instance_group.role == 'CORE':
                    slave_instance_type = instance_group.type
                mock_groups.append(emr_group)
                num_instances += instance_group.num_instances
                roles.add(instance_group.role)

                if 'TASK' in roles and 'CORE' not in roles:
                    raise boto.exception.EmrResponseError(
                        400, 'Bad Request', body=err_xml(
                        'Clusters with task nodes must also define core'
                        ' nodes.'))

                if 'MASTER' not in roles:
                    raise boto.exception.EmrResponseError(
                        400, 'Bad Request', body=err_xml(
                        'Zero master instance groups supplied, you must'
                        ' specify exactly one master instance group'))

        job_flow = MockEmrObject(
            availabilityzone=availability_zone,
            bootstrapactions=[make_fake_action(a) for a in bootstrap_actions],
            creationdatetime=to_iso8601(now),
            ec2keyname=ec2_keyname,
            hadoopversion=hadoop_version,
            instancecount=str(num_instances),
            instancegroups=mock_groups,
            jobflowid=jobflow_id,
            keepjobflowalivewhennosteps=('true' if keep_alive else 'false'),
            laststatechangereason='Provisioning Amazon EC2 capacity',
            masterinstancetype=master_instance_type,
            masterpublicdnsname='mockmaster',
            name=name,
            normalizedinstancehours='9999',  # just need this filled in for now
            state='STARTING',
            steps=[],
            api_params={},
            visibletoallusers='false',  # can only be set with api_params
        )

        if slave_instance_type is not None:
            job_flow.slaveinstancetype = slave_instance_type

        # AMI version is only set when you specify it explicitly
        if ami_version is not None:
            job_flow.amiversion = ami_version

        # don't always set loguri, so we can test Issue #112
        if log_uri is not None:
            job_flow.loguri = log_uri

        # include raw api params in job flow object
        if api_params:
            job_flow.api_params = api_params
            if 'VisibleToAllUsers' in api_params:
                job_flow.visibletoallusers = api_params['VisibleToAllUsers']

        self.mock_emr_job_flows[jobflow_id] = job_flow

        if enable_debugging:
            debugging_step = JarStep(name='Setup Hadoop Debugging',
                                     action_on_failure='TERMINATE_JOB_FLOW',
                                     main_class=None,
                                     jar=EmrConnection.DebuggingJar,
                                     step_args=EmrConnection.DebuggingArgs)
            steps.insert(0, debugging_step)
        self.add_jobflow_steps(jobflow_id, steps)

        return jobflow_id
Ejemplo n.º 9
0
    def run_jobflow(self, name, log_uri, ec2_keyname=None, availability_zone=None,
                    master_instance_type='m1.small',
                    slave_instance_type='m1.small', num_instances=1,
                    action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False,
                    enable_debugging=False,
                    hadoop_version='0.18',
                    steps=[],
                    bootstrap_actions=[]):
        """
        Runs a job flow

        :type name: str
        :param name: Name of the job flow
        :type log_uri: str
        :param log_uri: URI of the S3 bucket to place logs
        :type ec2_keyname: str
        :param ec2_keyname: EC2 key used for the instances
        :type availability_zone: str
        :param availability_zone: EC2 availability zone of the cluster
        :type master_instance_type: str
        :param master_instance_type: EC2 instance type of the master
        :type slave_instance_type: str
        :param slave_instance_type: EC2 instance type of the slave nodes
        :type num_instances: int
        :param num_instances: Number of instances in the Hadoop cluster
        :type action_on_failure: str
        :param action_on_failure: Action to take if a step terminates
        :type keep_alive: bool
        :param keep_alive: Denotes whether the cluster should stay alive upon completion
        :type enable_debugging: bool
        :param enable_debugging: Denotes whether AWS console debugging should be enabled.
        :type steps: list(boto.emr.Step)
        :param steps: List of steps to add with the job

        :rtype: str
        :return: The jobflow id
        """
        params = {}
        if action_on_failure:
            params['ActionOnFailure'] = action_on_failure
        params['Name'] = name
        params['LogUri'] = log_uri

        # Instance args
        instance_params = self._build_instance_args(ec2_keyname, availability_zone,
                                                    master_instance_type, slave_instance_type,
                                                    num_instances, keep_alive, hadoop_version)
        params.update(instance_params)

        # Debugging step from EMR API docs
        if enable_debugging:
            debugging_step = JarStep(name='Setup Hadoop Debugging',
                                     action_on_failure='TERMINATE_JOB_FLOW',
                                     main_class=None,
                                     jar=self.DebuggingJar,
                                     step_args=self.DebuggingArgs)
            steps.insert(0, debugging_step)

        # Step args
        if steps:
            step_args = [self._build_step_args(step) for step in steps]
            params.update(self._build_step_list(step_args))

        if bootstrap_actions:
            bootstrap_action_args = [self._build_bootstrap_action_args(bootstrap_action) for bootstrap_action in bootstrap_actions]
            params.update(self._build_bootstrap_action_list(bootstrap_action_args))

        response = self.get_object('RunJobFlow', params, RunJobFlowResponse)
        return response.jobflowid
Ejemplo n.º 10
0
	if not params['spot_bid_price']:
		print '\nERROR:You must specify a spot bid price to use spot instances!'
		usage()
	
	spot_instance_group = InstanceGroup(params['num_spot'],"TASK","c1.xlarge","SPOT","INITIAL_TASK_GROUP",params['spot_bid_price'])
		
	instance_groups=[namenode_instance_group,core_instance_group,spot_instance_group]

args = []

if params['test_mode'] == True:
	args.append('--testMode')

step = JarStep(
	name="CCParseJob",
	jar="s3://commoncrawl-public/commoncrawl-0.1.jar",
	main_class="org.commoncrawl.mapred.ec2.parser.EC2Launcher",
	action_on_failure="CANCEL_AND_WAIT",
	step_args=args)
	
print  instance_groups

#	instance_groups=[namenode_instance_group,core_instance_group,spot_instance_group],
jobid = conn.run_jobflow(
	name="EMR Parser JOB", 
	availability_zone="us-east-1d",
    log_uri="s3://" + params['s3_bucket'] + "/logs", 
	ec2_keyname=params['keypair'],
	instance_groups=instance_groups,
	keep_alive=True,
    enable_debugging=True,
	hadoop_version="0.20.205",
Ejemplo n.º 11
0
    def run_jobflow(self,
                    name,
                    log_uri,
                    ec2_keyname=None,
                    availability_zone=None,
                    master_instance_type='m1.small',
                    slave_instance_type='m1.small',
                    num_instances=1,
                    action_on_failure='TERMINATE_JOB_FLOW',
                    keep_alive=False,
                    enable_debugging=False,
                    hadoop_version=None,
                    steps=[],
                    bootstrap_actions=[],
                    instance_groups=None,
                    additional_info=None,
                    ami_version=None):
        """
        Runs a job flow
        :type name: str
        :param name: Name of the job flow
        
        :type log_uri: str
        :param log_uri: URI of the S3 bucket to place logs
        
        :type ec2_keyname: str
        :param ec2_keyname: EC2 key used for the instances
        
        :type availability_zone: str
        :param availability_zone: EC2 availability zone of the cluster
        
        :type master_instance_type: str
        :param master_instance_type: EC2 instance type of the master
        
        :type slave_instance_type: str
        :param slave_instance_type: EC2 instance type of the slave nodes
        
        :type num_instances: int
        :param num_instances: Number of instances in the Hadoop cluster
        
        :type action_on_failure: str
        :param action_on_failure: Action to take if a step terminates
        
        :type keep_alive: bool
        :param keep_alive: Denotes whether the cluster should stay
            alive upon completion
            
        :type enable_debugging: bool
        :param enable_debugging: Denotes whether AWS console debugging
            should be enabled.

        :type hadoop_version: str
        :param hadoop_version: Version of Hadoop to use. If ami_version
            is not set, defaults to '0.20' for backwards compatibility
            with older versions of boto.

        :type steps: list(boto.emr.Step)
        :param steps: List of steps to add with the job
        
        :type bootstrap_actions: list(boto.emr.BootstrapAction)
        :param bootstrap_actions: List of bootstrap actions that run
            before Hadoop starts.
            
        :type instance_groups: list(boto.emr.InstanceGroup)
        :param instance_groups: Optional list of instance groups to
            use when creating this job.
            NB: When provided, this argument supersedes num_instances
                and master/slave_instance_type.
                
        :type ami_version: str
        :param ami_version: Amazon Machine Image (AMI) version to use
            for instances. Values accepted by EMR are '1.0', '2.0', and
            'latest'; EMR currently defaults to '1.0' if you don't set
            'ami_version'.
            
        :type additional_info: JSON str
        :param additional_info: A JSON string for selecting additional features
        
        :rtype: str
        :return: The jobflow id
        """
        # hadoop_version used to default to '0.20', but this won't work
        # on later AMI versions, so only default if it ami_version isn't set.
        if not (hadoop_version or ami_version):
            hadoop_version = '0.20'

        params = {}
        if action_on_failure:
            params['ActionOnFailure'] = action_on_failure
        params['Name'] = name
        params['LogUri'] = log_uri

        # Common instance args
        common_params = self._build_instance_common_args(
            ec2_keyname, availability_zone, keep_alive, hadoop_version)
        params.update(common_params)

        # NB: according to the AWS API's error message, we must
        # "configure instances either using instance count, master and
        # slave instance type or instance groups but not both."
        #
        # Thus we switch here on the truthiness of instance_groups.
        if not instance_groups:
            # Instance args (the common case)
            instance_params = self._build_instance_count_and_type_args(
                master_instance_type, slave_instance_type, num_instances)
            params.update(instance_params)
        else:
            # Instance group args (for spot instances or a heterogenous cluster)
            list_args = self._build_instance_group_list_args(instance_groups)
            instance_params = dict(
                ('Instances.%s' % k, v) for k, v in list_args.iteritems())
            params.update(instance_params)

        # Debugging step from EMR API docs
        if enable_debugging:
            debugging_step = JarStep(name='Setup Hadoop Debugging',
                                     action_on_failure='TERMINATE_JOB_FLOW',
                                     main_class=None,
                                     jar=self.DebuggingJar,
                                     step_args=self.DebuggingArgs)
            steps.insert(0, debugging_step)

        # Step args
        if steps:
            step_args = [self._build_step_args(step) for step in steps]
            params.update(self._build_step_list(step_args))

        if bootstrap_actions:
            bootstrap_action_args = [
                self._build_bootstrap_action_args(bootstrap_action)
                for bootstrap_action in bootstrap_actions
            ]
            params.update(
                self._build_bootstrap_action_list(bootstrap_action_args))

        if ami_version:
            params['AmiVersion'] = ami_version

        if additional_info is not None:
            params['AdditionalInfo'] = additional_info

        response = self.get_object('RunJobFlow',
                                   params,
                                   RunJobFlowResponse,
                                   verb='POST')
        return response.jobflowid
Ejemplo n.º 12
0
def run_emr(args):
    validate_input_path(args.run_folder)

    if args.time is None:
        time_arg = []
    else:
        try:
            datetime.strptime(args.time, '%Y-%m-%d-%H-%M-%S')
        except ValueError as e:
            print("Invalid time")
            print(e.message)
            sys.exit(1)
        time_arg = ['--time', args.time]

    c = boto.connect_s3(profile_name=args.profile)
    jar_bucket = c.get_bucket(args.run_folder.split("/")[2])
    r = get_valid_region(jar_bucket.get_location())

    if args.jar is None:
        path = "s3://snowplow-hosted-assets/4-storage/event-manifest-cleaner/" + JAR_FILE
    else:
        path = args.jar

    step_args = [
        "spark-submit",
        "--deploy-mode",
        "cluster",

        "--class",
        "com.snowplowanalytics.snowplow.manifestcleaner.Main",

        path,

        "--run-folder",
        args.run_folder,

        "--storage-config",
        base64encode(args.storage_config),

        "--resolver",
        base64encode(args.resolver),
    ] + time_arg

    steps = [
        JarStep("Run Event Manifest Cleaner Spark job", "command-runner.jar", step_args=step_args)
    ]

    conn = boto.emr.connect_to_region(r, profile_name=args.profile)
    job_id = conn.run_jobflow(
        name="Snowplow Event Manifest Cleaner ",
        log_uri=args.log_path,
        ec2_keyname=args.ec2_keyname,
        master_instance_type="m3.xlarge",
        slave_instance_type="m3.xlarge",
        num_instances=2,
        enable_debugging=False,
        steps=steps,
        job_flow_role="EMR_EC2_DefaultRole",
        service_role="EMR_DefaultRole",
        api_params={
            'ReleaseLabel': 'emr-5.4.0',
            'Applications.member.1.Name': 'Spark',
            'Applications.member.2.Name': 'Hadoop',
        }
    )
    print("Started jobflow " + job_id)
Ejemplo n.º 13
0
def create_emr_cluster(cr):
    """
    @PARAM:  Cluster configuration reader object
    Creates an EMR cluster given a set of configuration parameters
    Return:  EMR Cluster ID
    """

    #region = cr.get_config("aws_region")
    #conn = boto.emr.connect_to_region(region)
    conn = EmrConnection(
        cr.get_config("aws_access_key"),
        cr.get_config("aws_secret_key"),
        region=RegionInfo(name=cr.get_config("aws_region"),
                          endpoint=cr.get_config("aws_region") +
                          ".elasticmapreduce.amazonaws.com"))

    #  Create list of instance groups:  master, core, and task
    instance_groups = []
    instance_groups.append(
        InstanceGroup(num_instances=cr.get_config("emr_master_node_count"),
                      role="MASTER",
                      type=cr.get_config("emr_master_node_type"),
                      market=cr.get_config("emr_market_type"),
                      name="Master Node"))

    instance_groups.append(
        InstanceGroup(num_instances=cr.get_config("emr_core_node_count"),
                      role="CORE",
                      type=cr.get_config("emr_core_node_type"),
                      market=cr.get_config("emr_market_type"),
                      name="Core Node"))

    #  Only create task nodes if specifcally asked for
    if cr.get_config("emr_task_node_count") > 0:
        instance_groups.append(
            InstanceGroup(num_instances=cr.get_config("emr_task_node_count"),
                          role="TASK",
                          type=cr.get_config("emr_task_node_type"),
                          market=cr.get_config("emr_market_type"),
                          name="Task Node"))

    print "Creating EMR Cluster with instance groups: {0}".format(
        instance_groups)

    #  Use these params to add overrrides, these will go away in Boto3
    api_params = {
        "Instances.Ec2SubnetId": cr.get_config("aws_subnet_id"),
        "ReleaseLabel": cr.get_config("emr_version")
    }

    #  Add step to load data
    step_args = [
        "s3-dist-cp", "--s3Endpoint=s3-us-west-1.amazonaws.com",
        "--src=s3://alpine-qa/automation/automation_test_data/",
        "--dest=hdfs:///automation_test_data", "--srcPattern=.*[a-zA-Z,]+"
    ]
    step = JarStep(name="s3distcp for data loading",
                   jar="command-runner.jar",
                   step_args=step_args,
                   action_on_failure="CONTINUE")

    cluster_id = conn.run_jobflow(
        cr.get_config("emr_cluster_name"),
        instance_groups=instance_groups,
        action_on_failure="TERMINATE_JOB_FLOW",
        keep_alive=True,
        enable_debugging=True,
        log_uri=cr.get_config("emr_log_uri"),
        #hadoop_version = "Amazon 2.7.2",
        #ReleaseLabel = "emr-5.0.0",
        #ami_version = "5.0.0",
        steps=[step],
        bootstrap_actions=[],
        ec2_keyname=cr.get_config("ec2_keyname"),
        visible_to_all_users=True,
        job_flow_role="EMR_EC2_DefaultRole",
        service_role="EMR_DefaultRole",
        api_params=api_params)

    print "EMR Cluster created, cluster id: {0}".format(cluster_id)
    state = conn.describe_cluster(cluster_id).status.state
    while state != u'COMPLETED' and state != u'SHUTTING_DOWN' and state != u'FAILED' and state != u'WAITING':
        #sleeping to recheck for status.
        time.sleep(5)
        state = conn.describe_cluster(cluster_id).status.state
        print "State is: {0}, sleeping 5s...".format(state)

    if state == u'SHUTTING_DOWN' or state == u'FAILED':
        return "ERROR"

    #Check if the state is WAITING. Then launch the next steps
    if state == u'WAITING':
        #Finding the master node dns of EMR cluster
        master_dns = conn.describe_cluster(cluster_id).masterpublicdnsname
        print "DNS Name: {0}".format(master_dns)
        return cluster_id
Ejemplo n.º 14
0
    def run_jobflow(self,
                    name,
                    log_uri,
                    ec2_keyname=None,
                    availability_zone=None,
                    master_instance_type='m1.small',
                    slave_instance_type='m1.small',
                    num_instances=1,
                    action_on_failure='TERMINATE_JOB_FLOW',
                    keep_alive=False,
                    enable_debugging=False,
                    hadoop_version='0.20',
                    steps=[],
                    bootstrap_actions=[],
                    instance_groups=None):
        """
        Runs a job flow

        :type name: str
        :param name: Name of the job flow
        :type log_uri: str
        :param log_uri: URI of the S3 bucket to place logs
        :type ec2_keyname: str
        :param ec2_keyname: EC2 key used for the instances
        :type availability_zone: str
        :param availability_zone: EC2 availability zone of the cluster
        :type master_instance_type: str
        :param master_instance_type: EC2 instance type of the master
        :type slave_instance_type: str
        :param slave_instance_type: EC2 instance type of the slave nodes
        :type num_instances: int
        :param num_instances: Number of instances in the Hadoop cluster
        :type action_on_failure: str
        :param action_on_failure: Action to take if a step terminates
        :type keep_alive: bool
        :param keep_alive: Denotes whether the cluster should stay alive upon completion
        :type enable_debugging: bool
        :param enable_debugging: Denotes whether AWS console debugging should be enabled.
        :type steps: list(boto.emr.Step)
        :param steps: List of steps to add with the job
        :type steps: list(boto.emr.InstanceGroup)
        :param steps: Optional list of instance groups to use when creating
                      this job. NB: When provided, this argument supersedes
                      num_instances and master/slave_instance_type.
        :rtype: str
        :return: The jobflow id
        """
        params = {}
        if action_on_failure:
            params['ActionOnFailure'] = action_on_failure
        params['Name'] = name
        params['LogUri'] = log_uri

        # Common instance args
        common_params = self._build_instance_common_args(
            ec2_keyname, availability_zone, keep_alive, hadoop_version)
        params.update(common_params)

        # NB: according to the AWS API's error message, we must
        # "configure instances either using instance count, master and
        # slave instance type or instance groups but not both."
        #
        # Thus we switch here on the truthiness of instance_groups.
        if not instance_groups:
            # Instance args (the common case)
            instance_params = self._build_instance_count_and_type_args(
                master_instance_type, slave_instance_type, num_instances)
            params.update(instance_params)
        else:
            # Instance group args (for spot instances or a heterogenous cluster)
            list_args = self._build_instance_group_list_args(instance_groups)
            instance_params = dict(
                ('Instances.%s' % k, v) for k, v in list_args.iteritems())
            params.update(instance_params)

        # Debugging step from EMR API docs
        if enable_debugging:
            debugging_step = JarStep(name='Setup Hadoop Debugging',
                                     action_on_failure='TERMINATE_JOB_FLOW',
                                     main_class=None,
                                     jar=self.DebuggingJar,
                                     step_args=self.DebuggingArgs)
            steps.insert(0, debugging_step)

        # Step args
        if steps:
            step_args = [self._build_step_args(step) for step in steps]
            params.update(self._build_step_list(step_args))

        if bootstrap_actions:
            bootstrap_action_args = [
                self._build_bootstrap_action_args(bootstrap_action)
                for bootstrap_action in bootstrap_actions
            ]
            params.update(
                self._build_bootstrap_action_list(bootstrap_action_args))

        response = self.get_object('RunJobFlow',
                                   params,
                                   RunJobFlowResponse,
                                   verb='POST')
        return response.jobflowid