コード例 #1
0
    def run_job(self, job_execution):
        ctx = context.ctx()

        job = conductor.job_get(ctx, job_execution.job_id)
        input_source, output_source = job_utils.get_data_sources(
            job_execution, job)

        # Updated_job_configs will be a copy of job_execution.job_configs with
        # any name or uuid references to data_sources resolved to paths
        # assuming substitution is enabled.
        # If substitution is not enabled then updated_job_configs will
        # just be a reference to job_execution.job_configs to avoid a copy.
        # Additional_sources will be a list of any data_sources found.
        additional_sources, updated_job_configs = (
            job_utils.resolve_data_source_references(
                job_execution.job_configs))

        proxy_configs = updated_job_configs.get('proxy_configs')
        configs = updated_job_configs.get('configs', {})

        for data_source in [input_source, output_source] + additional_sources:
            if data_source and data_source.type == 'hdfs':
                h.configure_cluster_for_hdfs(self.cluster, data_source)
                break

        hdfs_user = self.get_hdfs_user()

        # TODO(tmckay): this should probably be "get_namenode"
        # but that call does not exist in the oozie engine api now.
        oozie_server = self.get_oozie_server(self.cluster)

        wf_dir = self._create_hdfs_workflow_dir(oozie_server, job)
        self._upload_job_files_to_hdfs(oozie_server, wf_dir, job, configs,
                                       proxy_configs)

        wf_xml = workflow_factory.get_workflow_xml(job, self.cluster,
                                                   updated_job_configs,
                                                   input_source, output_source,
                                                   hdfs_user)

        path_to_workflow = self._upload_workflow_file(oozie_server, wf_dir,
                                                      wf_xml, hdfs_user)

        job_params = self._get_oozie_job_params(hdfs_user, path_to_workflow)

        client = self._get_client()
        oozie_job_id = client.add_job(x.create_hadoop_xml(job_params),
                                      job_execution)
        job_execution = conductor.job_execution_get(ctx, job_execution.id)
        if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
            return (None, edp.JOB_STATUS_KILLED, None)
        client.run_job(job_execution, oozie_job_id)
        try:
            status = client.get_job_status(job_execution,
                                           oozie_job_id)['status']
        except Exception:
            status = None
        return (oozie_job_id, status, None)
コード例 #2
0
ファイル: engine.py プロジェクト: msionkin/sahara
    def run_job(self, job_execution):
        ctx = context.ctx()
        job = conductor.job_get(ctx, job_execution.job_id)

        # This will be a dictionary of tuples, (native_url, runtime_url)
        # keyed by data_source id
        data_source_urls = {}

        additional_sources, updated_job_configs = (
            job_utils.resolve_data_source_references(job_execution.job_configs,
                                                     job_execution.id,
                                                     data_source_urls,
                                                     self.cluster)
        )

        job_execution = conductor.job_execution_update(
            ctx, job_execution,
            {"data_source_urls": job_utils.to_url_dict(data_source_urls)})

        # Now that we've recorded the native urls, we can switch to the
        # runtime urls
        data_source_urls = job_utils.to_url_dict(data_source_urls,
                                                 runtime=True)

        # We'll always run the driver program on the master
        master = plugin_utils.get_instance(self.cluster, "nimbus")

        # TODO(tmckay): wf_dir should probably be configurable.
        # The only requirement is that the dir is writable by the image user
        wf_dir = job_utils.create_workflow_dir(master, '/tmp/storm-edp', job,
                                               job_execution.id, "700")

        paths = self._upload_job_files(master, wf_dir, job,
                                       updated_job_configs)

        # We can shorten the paths in this case since we'll run out of wf_dir
        paths = [os.path.basename(p) for p in paths]

        app_jar = paths.pop(0)
        job_class = updated_job_configs["configs"]["edp.java.main_class"]
        topology_name = self._generate_topology_name(job.name)

        # Launch the storm job using storm jar
        host = master.hostname()
        args = updated_job_configs.get('args', [])
        args = " ".join([arg for arg in args])

        if args:
            args = " " + args

        cmd = (
            '%(storm_jar)s -c nimbus.host=%(host)s %(job_jar)s '
            '%(main_class)s %(topology_name)s%(args)s' % (
                {
                    "storm_jar": "/usr/local/storm/bin/storm jar",
                    "main_class": job_class,
                    "job_jar": app_jar,
                    "host": host,
                    "topology_name": topology_name,
                    "args": args
                }))

        job_execution = conductor.job_execution_get(ctx, job_execution.id)
        if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
            return (None, edp.JOB_STATUS_KILLED, None)

        # If an exception is raised here, the job_manager will mark
        # the job failed and log the exception
        # The redirects of stdout and stderr will preserve output in the wf_dir
        with remote.get_remote(master) as r:
            # Upload the command launch script
            launch = os.path.join(wf_dir, "launch_command")
            r.write_file_to(launch, self._job_script())
            r.execute_command("chmod +x %s" % launch)
            ret, stdout = r.execute_command(
                "cd %s; ./launch_command %s > /dev/null 2>&1 & echo $!"
                % (wf_dir, cmd))

        if ret == 0:
            # Success, we'll add the wf_dir in job_execution.extra and store
            # topology_name@instance_id as the job id
            # We know the job is running so return "RUNNING"
            return (topology_name + "@" + master.id,
                    edp.JOB_STATUS_RUNNING,
                    {'storm-path': wf_dir})

        # Hmm, no execption but something failed.
        # Since we're using backgrounding with redirect, this is unlikely.
        raise e.EDPError(_("Storm job execution failed. Exit status = "
                           "%(status)s, stdout = %(stdout)s") %
                         {'status': ret, 'stdout': stdout})
コード例 #3
0
    def run_job(self, job_execution):
        ctx = context.ctx()
        job = conductor.job_get(ctx, job_execution.job_id)

        additional_sources, updated_job_configs = (
            job_utils.resolve_data_source_references(
                job_execution.job_configs))

        # We'll always run the driver program on the master
        master = plugin_utils.get_instance(self.cluster,
                                           spark.SPARK_MASTER.ui_name)

        # TODO(tmckay): wf_dir should probably be configurable.
        # The only requirement is that the dir is writable by the image user
        wf_dir = job_utils.create_workflow_dir(master, '/tmp/spark-edp', job,
                                               job_execution.id, "700")
        paths, builtin_paths = self._upload_job_files(master, wf_dir, job,
                                                      updated_job_configs)

        # We can shorten the paths in this case since we'll run out of wf_dir
        paths = [os.path.basename(p) for p in paths]
        builtin_paths = [os.path.basename(p) for p in builtin_paths]

        # TODO(tmckay): for now, paths[0] is always assumed to be the app
        # jar and we generate paths in order (mains, then libs).
        # When we have a Spark job type, we can require a "main" and set
        # the app jar explicitly to be "main"
        app_jar = paths.pop(0)
        job_class = updated_job_configs["configs"]["edp.java.main_class"]

        # If we uploaded builtins then we are using a wrapper jar. It will
        # be the first one on the builtin list and the original app_jar needs
        # to be added to the  'additional' jars
        if builtin_paths:
            wrapper_jar = builtin_paths.pop(0)
            wrapper_class = 'org.openstack.sahara.edp.SparkWrapper'
            wrapper_xml = self._upload_wrapper_xml(master, wf_dir,
                                                   updated_job_configs)
            wrapper_args = "%s %s" % (wrapper_xml, job_class)
            additional_jars = ",".join([app_jar] + paths + builtin_paths)
        else:
            wrapper_jar = wrapper_class = wrapper_args = ""
            additional_jars = ",".join(paths)

        # All additional jars are passed with the --jars option
        if additional_jars:
            additional_jars = " --jars " + additional_jars

        # Launch the spark job using spark-submit and deploy_mode = client
        cluster_context = self._get_cluster_context(self.cluster)
        spark_home_dir = spark.Spark().home_dir(cluster_context)

        # TODO(tmckay): we need to clean up wf_dirs on long running clusters
        # TODO(tmckay): probably allow for general options to spark-submit
        args = updated_job_configs.get('args', [])
        args = " ".join([su.inject_swift_url_suffix(arg) for arg in args])

        submit_args = {
            "spark_submit": "%s/bin/spark-submit" % spark_home_dir,
            "addnl_jars": additional_jars,
            "master_url": spark.SPARK_MASTER.submit_url(cluster_context),
            "args": args
        }
        if wrapper_jar and wrapper_class:
            # Substrings which may be empty have spaces
            # embedded if they are non-empty
            submit_args.update({
                "driver_cp": self.get_driver_classpath(),
                "wrapper_class": wrapper_class,
                "wrapper_jar": wrapper_jar,
                "wrapper_args": wrapper_args,
            })
            submit_cmd = ('%(spark_submit)s%(driver_cp)s'
                          ' --class %(wrapper_class)s%(addnl_jars)s'
                          ' --master %(master_url)s'
                          ' %(wrapper_jar)s %(wrapper_args)s %(args)s')
        else:
            submit_args.update({
                "job_class": job_class,
                "app_jar": app_jar,
            })
            submit_cmd = ('%(spark_submit)s --class %(job_class)s'
                          '%(addnl_jars)s --master %(master_url)s'
                          ' %(app_jar)s %(args)s')
        submit_cmd = g._run_as('mapr', submit_cmd % submit_args)

        job_execution = conductor.job_execution_get(ctx, job_execution.id)
        if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
            return (None, edp.JOB_STATUS_KILLED, None)

        # If an exception is raised here, the job_manager will mark
        # the job failed and log the exception
        # The redirects of stdout and stderr will preserve output in the wf_dir
        with master.remote() as r:
            # Upload the command launch script
            launch = os.path.join(wf_dir, "launch_command")
            r.write_file_to(launch, self._job_script())
            r.execute_command("chmod +x %s" % launch)
            ret, stdout = r.execute_command(
                "cd %s && ./launch_command %s > /dev/null 2>&1 & echo $!" %
                (wf_dir, submit_cmd),
                raise_when_error=False)

        if ret == 0:
            # Success, we'll add the wf_dir in job_execution.extra and store
            # pid@instance_id as the job id
            # We know the job is running so return "RUNNING"
            return (stdout.strip() + "@" + master.id, edp.JOB_STATUS_RUNNING, {
                'spark-path': wf_dir
            })

        # Hmm, no execption but something failed.
        # Since we're using backgrounding with redirect, this is unlikely.
        raise e.EDPError(
            _("Spark job execution failed. Exit status = "
              "%(status)s, stdout = %(stdout)s") % {
                  'status': ret,
                  'stdout': stdout
              })
コード例 #4
0
ファイル: engine.py プロジェクト: openstacking/sahara
    def run_job(self, job_execution):
        ctx = context.ctx()
        job = conductor.job_get(ctx, job_execution.job_id)
        indep_params = {}
        data_source_urls = {}
        additional_sources, updated_job_configs = (
            job_utils.resolve_data_source_references(
                job_execution.job_configs, job_execution.id, data_source_urls)
        )

        job_execution = conductor.job_execution_update(
            ctx, job_execution, {"data_source_urls": data_source_urls})

        for data_source in additional_sources:
            if data_source and data_source.type == 'hdfs':
                h.configure_cluster_for_hdfs(self.cluster, data_source)
                break

        # It is needed in case we are working with Spark plugin
        self.plugin_params['master'] = (
            self.plugin_params['master'] % {'host': self.master.hostname()})

        # TODO(tmckay): wf_dir should probably be configurable.
        # The only requirement is that the dir is writable by the image user
        wf_dir = job_utils.create_workflow_dir(self.master, '/tmp/spark-edp',
                                               job, job_execution.id, "700")
        paths, builtin_paths = self._upload_job_files(
            self.master, wf_dir, job, updated_job_configs)

        # We can shorten the paths in this case since we'll run out of wf_dir
        paths = [os.path.basename(p) for p in paths]
        builtin_paths = [os.path.basename(p) for p in builtin_paths]

        # TODO(tmckay): for now, paths[0] is always assumed to be the app
        # jar and we generate paths in order (mains, then libs).
        # When we have a Spark job type, we can require a "main" and set
        # the app jar explicitly to be "main"
        indep_params["app_jar"] = paths.pop(0)
        indep_params["job_class"] = (
            updated_job_configs["configs"]["edp.java.main_class"])

        # If we uploaded builtins then we are using a wrapper jar. It will
        # be the first one on the builtin list and the original app_jar needs
        # to be added to the  'additional' jars
        if builtin_paths:
            indep_params["wrapper_jar"] = builtin_paths.pop(0)
            indep_params["wrapper_class"] = (
                'org.openstack.sahara.edp.SparkWrapper')
            wrapper_xml = self._upload_wrapper_xml(self.master,
                                                   wf_dir,
                                                   updated_job_configs)
            indep_params["wrapper_args"] = "%s %s" % (
                wrapper_xml, indep_params["job_class"])

            indep_params["addnl_jars"] = ",".join(
                [indep_params["app_jar"]] + paths + builtin_paths)

        else:
            indep_params["addnl_jars"] = ",".join(paths)

        # All additional jars are passed with the --jars option
        if indep_params["addnl_jars"]:
            indep_params["addnl_jars"] = (
                " --jars " + indep_params["addnl_jars"])

        # Launch the spark job using spark-submit and deploy_mode = client
        # TODO(tmckay): we need to clean up wf_dirs on long running clusters
        # TODO(tmckay): probably allow for general options to spark-submit
        indep_params["args"] = updated_job_configs.get('args', [])
        indep_params["args"] = " ".join([su.inject_swift_url_suffix(arg)
                                         for arg in indep_params["args"]])
        if indep_params.get("args"):
            indep_params["args"] = (" " + indep_params["args"])

        mutual_dict = self.plugin_params.copy()
        mutual_dict.update(indep_params)
        if mutual_dict.get("wrapper_jar"):
            # Substrings which may be empty have spaces
            # embedded if they are non-empty
            cmd = (
                '%(spark-user)s%(spark-submit)s%(driver-class-path)s'
                ' --class %(wrapper_class)s%(addnl_jars)s'
                ' --master %(master)s'
                ' --deploy-mode %(deploy-mode)s'
                ' %(wrapper_jar)s %(wrapper_args)s%(args)s') % dict(
                mutual_dict)
        else:
            cmd = (
                '%(spark-user)s%(spark-submit)s'
                ' --class %(job_class)s%(addnl_jars)s'
                ' --master %(master)s'
                ' --deploy-mode %(deploy-mode)s'
                ' %(app_jar)s%(args)s') % dict(
                mutual_dict)
        job_execution = conductor.job_execution_get(ctx, job_execution.id)
        if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
            return (None, edp.JOB_STATUS_KILLED, None)

        # If an exception is raised here, the job_manager will mark
        # the job failed and log the exception
        # The redirects of stdout and stderr will preserve output in the wf_dir
        with remote.get_remote(self.master) as r:
            # Upload the command launch script
            launch = os.path.join(wf_dir, "launch_command")
            r.write_file_to(launch, self._job_script())
            r.execute_command("chmod u+rwx,g+rx,o+rx %s" % wf_dir)
            r.execute_command("chmod +x %s" % launch)
            ret, stdout = r.execute_command(
                "cd %s; ./launch_command %s > /dev/null 2>&1 & echo $!"
                % (wf_dir, cmd))

        if ret == 0:
            # Success, we'll add the wf_dir in job_execution.extra and store
            # pid@instance_id as the job id
            # We know the job is running so return "RUNNING"
            return (stdout.strip() + "@" + self.master.id,
                    edp.JOB_STATUS_RUNNING,
                    {'spark-path': wf_dir})

        # Hmm, no execption but something failed.
        # Since we're using backgrounding with redirect, this is unlikely.
        raise e.EDPError(_("Spark job execution failed. Exit status = "
                           "%(status)s, stdout = %(stdout)s") %
                         {'status': ret, 'stdout': stdout})
コード例 #5
0
ファイル: engine.py プロジェクト: AllenFromMinneapolis/sahara
    def run_job(self, job_execution):
        ctx = context.ctx()
        job = conductor.job_get(ctx, job_execution.job_id)

        additional_sources, updated_job_configs = (
            job_utils.resolve_data_source_references(job_execution.job_configs)
        )

        for data_source in additional_sources:
            if data_source and data_source.type == 'hdfs':
                h.configure_cluster_for_hdfs(self.cluster, data_source)
                break

        # We'll always run the driver program on the master
        master = plugin_utils.get_instance(self.cluster, "master")

        # TODO(tmckay): wf_dir should probably be configurable.
        # The only requirement is that the dir is writable by the image user
        wf_dir = job_utils.create_workflow_dir(master, '/tmp/spark-edp', job,
                                               job_execution.id, "700")

        paths, builtin_paths = self._upload_job_files(
            master, wf_dir, job, updated_job_configs)

        # We can shorten the paths in this case since we'll run out of wf_dir
        paths = [os.path.basename(p) for p in paths]
        builtin_paths = [os.path.basename(p) for p in builtin_paths]

        # TODO(tmckay): for now, paths[0] is always assumed to be the app
        # jar and we generate paths in order (mains, then libs).
        # When we have a Spark job type, we can require a "main" and set
        # the app jar explicitly to be "main"
        app_jar = paths.pop(0)
        job_class = updated_job_configs["configs"]["edp.java.main_class"]

        # If we uploaded builtins then we are using a wrapper jar. It will
        # be the first one on the builtin list and the original app_jar needs
        # to be added to the  'additional' jars
        if builtin_paths:
            wrapper_jar = builtin_paths.pop(0)
            wrapper_class = 'org.openstack.sahara.edp.SparkWrapper'
            wrapper_xml = self._upload_wrapper_xml(master,
                                                   wf_dir,
                                                   updated_job_configs)
            wrapper_args = "%s %s" % (wrapper_xml, job_class)

            additional_jars = ",".join([app_jar] + paths + builtin_paths)

        else:
            wrapper_jar = wrapper_class = wrapper_args = ""
            additional_jars = ",".join(paths)

        # All additional jars are passed with the --jars option
        if additional_jars:
            additional_jars = " --jars " + additional_jars

        # Launch the spark job using spark-submit and deploy_mode = client
        host = master.hostname()
        port = c_helper.get_config_value("Spark", "Master port", self.cluster)
        spark_submit = os.path.join(
            c_helper.get_config_value("Spark",
                                      "Spark home",
                                      self.cluster),
            "bin/spark-submit")

        # TODO(tmckay): we need to clean up wf_dirs on long running clusters
        # TODO(tmckay): probably allow for general options to spark-submit
        args = updated_job_configs.get('args', [])
        args = " ".join([su.inject_swift_url_suffix(arg) for arg in args])
        if args:
            args = " " + args

        if wrapper_jar and wrapper_class:
            # Substrings which may be empty have spaces
            # embedded if they are non-empty
            cmd = (
                '%(spark_submit)s%(driver_cp)s'
                ' --class %(wrapper_class)s%(addnl_jars)s'
                ' --master spark://%(host)s:%(port)s'
                ' %(wrapper_jar)s %(wrapper_args)s%(args)s') % (
                {
                    "spark_submit": spark_submit,
                    "driver_cp": self.get_driver_classpath(),
                    "wrapper_class": wrapper_class,
                    "addnl_jars": additional_jars,
                    "host": host,
                    "port": port,
                    "wrapper_jar": wrapper_jar,
                    "wrapper_args": wrapper_args,
                    "args": args
                })
        else:
            cmd = (
                '%(spark_submit)s --class %(job_class)s%(addnl_jars)s'
                ' --master spark://%(host)s:%(port)s %(app_jar)s%(args)s') % (
                {
                    "spark_submit": spark_submit,
                    "job_class": job_class,
                    "addnl_jars": additional_jars,
                    "host": host,
                    "port": port,
                    "app_jar": app_jar,
                    "args": args
                })

        job_execution = conductor.job_execution_get(ctx, job_execution.id)
        if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
            return (None, edp.JOB_STATUS_KILLED, None)

        # If an exception is raised here, the job_manager will mark
        # the job failed and log the exception
        # The redirects of stdout and stderr will preserve output in the wf_dir
        with remote.get_remote(master) as r:
            # Upload the command launch script
            launch = os.path.join(wf_dir, "launch_command")
            r.write_file_to(launch, self._job_script())
            r.execute_command("chmod +x %s" % launch)
            ret, stdout = r.execute_command(
                "cd %s; ./launch_command %s > /dev/null 2>&1 & echo $!"
                % (wf_dir, cmd))

        if ret == 0:
            # Success, we'll add the wf_dir in job_execution.extra and store
            # pid@instance_id as the job id
            # We know the job is running so return "RUNNING"
            return (stdout.strip() + "@" + master.id,
                    edp.JOB_STATUS_RUNNING,
                    {'spark-path': wf_dir})

        # Hmm, no execption but something failed.
        # Since we're using backgrounding with redirect, this is unlikely.
        raise e.EDPError(_("Spark job execution failed. Exit status = "
                           "%(status)s, stdout = %(stdout)s") %
                         {'status': ret, 'stdout': stdout})
コード例 #6
0
    def run_job(self, job_execution):
        ctx = context.ctx()
        job = conductor.job_get(ctx, job_execution.job_id)
        indep_params = {}
        data_source_urls = {}
        additional_sources, updated_job_configs = (
            job_utils.resolve_data_source_references(job_execution.job_configs,
                                                     job_execution.id,
                                                     data_source_urls))

        job_execution = conductor.job_execution_update(
            ctx, job_execution, {"data_source_urls": data_source_urls})

        for data_source in additional_sources:
            if data_source and data_source.type == 'hdfs':
                h.configure_cluster_for_hdfs(self.cluster, data_source)
                break

        # It is needed in case we are working with Spark plugin
        self.plugin_params['master'] = (self.plugin_params['master'] % {
            'host': self.master.hostname()
        })

        # TODO(tmckay): wf_dir should probably be configurable.
        # The only requirement is that the dir is writable by the image user
        wf_dir = job_utils.create_workflow_dir(self.master, '/tmp/spark-edp',
                                               job, job_execution.id, "700")
        paths, builtin_paths = self._upload_job_files(self.master, wf_dir, job,
                                                      updated_job_configs)

        # We can shorten the paths in this case since we'll run out of wf_dir
        paths = [os.path.basename(p) for p in paths]
        builtin_paths = [os.path.basename(p) for p in builtin_paths]

        # TODO(tmckay): for now, paths[0] is always assumed to be the app
        # jar and we generate paths in order (mains, then libs).
        # When we have a Spark job type, we can require a "main" and set
        # the app jar explicitly to be "main"
        indep_params["app_jar"] = paths.pop(0)
        indep_params["job_class"] = (
            updated_job_configs["configs"]["edp.java.main_class"])

        # If we uploaded builtins then we are using a wrapper jar. It will
        # be the first one on the builtin list and the original app_jar needs
        # to be added to the  'additional' jars
        if builtin_paths:
            indep_params["wrapper_jar"] = builtin_paths.pop(0)
            indep_params["wrapper_class"] = (
                'org.openstack.sahara.edp.SparkWrapper')
            wrapper_xml = self._upload_wrapper_xml(self.master, wf_dir,
                                                   updated_job_configs)
            indep_params["wrapper_args"] = "%s %s" % (
                wrapper_xml, indep_params["job_class"])

            indep_params["addnl_files"] = wrapper_xml

            indep_params["addnl_jars"] = ",".join(
                [indep_params["wrapper_jar"]] + paths + builtin_paths)

        else:
            indep_params["addnl_jars"] = ",".join(paths)

        # All additional jars are passed with the --jars option
        if indep_params["addnl_jars"]:
            indep_params["addnl_jars"] = (" --jars " +
                                          indep_params["addnl_jars"])

        # Launch the spark job using spark-submit and deploy_mode = client
        # TODO(tmckay): we need to clean up wf_dirs on long running clusters
        # TODO(tmckay): probably allow for general options to spark-submit
        indep_params["args"] = updated_job_configs.get('args', [])
        indep_params["args"] = " ".join(
            [su.inject_swift_url_suffix(arg) for arg in indep_params["args"]])
        if indep_params.get("args"):
            indep_params["args"] = (" " + indep_params["args"])

        mutual_dict = self.plugin_params.copy()
        mutual_dict.update(indep_params)
        if mutual_dict.get("wrapper_jar"):
            # Substrings which may be empty have spaces
            # embedded if they are non-empty
            cmd = ('%(spark-user)s%(spark-submit)s%(driver-class-path)s'
                   ' --files %(addnl_files)s'
                   ' --class %(wrapper_class)s%(addnl_jars)s'
                   ' --master %(master)s'
                   ' --deploy-mode %(deploy-mode)s'
                   ' %(app_jar)s %(wrapper_args)s%(args)s') % dict(mutual_dict)
        else:
            cmd = ('%(spark-user)s%(spark-submit)s'
                   ' --class %(job_class)s%(addnl_jars)s'
                   ' --master %(master)s'
                   ' --deploy-mode %(deploy-mode)s'
                   ' %(app_jar)s%(args)s') % dict(mutual_dict)
        job_execution = conductor.job_execution_get(ctx, job_execution.id)
        if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
            return (None, edp.JOB_STATUS_KILLED, None)

        # If an exception is raised here, the job_manager will mark
        # the job failed and log the exception
        # The redirects of stdout and stderr will preserve output in the wf_dir
        with remote.get_remote(self.master) as r:
            # Upload the command launch script
            launch = os.path.join(wf_dir, "launch_command")
            r.write_file_to(launch, self._job_script())
            r.execute_command("chmod u+rwx,g+rx,o+rx %s" % wf_dir)
            r.execute_command("chmod +x %s" % launch)
            ret, stdout = r.execute_command(
                "cd %s; ./launch_command %s > /dev/null 2>&1 & echo $!" %
                (wf_dir, cmd))

        if ret == 0:
            # Success, we'll add the wf_dir in job_execution.extra and store
            # pid@instance_id as the job id
            # We know the job is running so return "RUNNING"
            return (stdout.strip() + "@" + self.master.id,
                    edp.JOB_STATUS_RUNNING, {
                        'spark-path': wf_dir
                    })

        # Hmm, no execption but something failed.
        # Since we're using backgrounding with redirect, this is unlikely.
        raise e.EDPError(
            _("Spark job execution failed. Exit status = "
              "%(status)s, stdout = %(stdout)s") % {
                  'status': ret,
                  'stdout': stdout
              })
コード例 #7
0
ファイル: test_job_utils.py プロジェクト: yonglehou/sahara
    def test_resolve_data_source_refs(self, data_source_get_all, ctx):

        ctx.return_value = 'dummy'

        name_ref = job_utils.DATA_SOURCE_PREFIX + 'input'
        job_exec_id = six.text_type(uuid.uuid4())

        input_url = "swift://container/input"
        input = u.create_data_source(input_url,
                                     name="input",
                                     id=six.text_type(uuid.uuid4()))

        output = u.create_data_source("swift://container/output.%JOB_EXEC_ID%",
                                      name="output",
                                      id=six.text_type(uuid.uuid4()))
        output_url = "swift://container/output." + job_exec_id

        by_name = {'input': input, 'output': output}

        by_id = {input.id: input, output.id: output}

        # Pretend to be the database
        def _get_all(ctx, **kwargs):
            name = kwargs.get('name')
            if name in by_name:
                name_list = [by_name[name]]
            else:
                name_list = []

            id = kwargs.get('id')
            if id in by_id:
                id_list = [by_id[id]]
            else:
                id_list = []
            return list(set(name_list + id_list))

        data_source_get_all.side_effect = _get_all

        job_configs = {
            'configs': {
                job_utils.DATA_SOURCE_SUBST_NAME: True,
                job_utils.DATA_SOURCE_SUBST_UUID: True
            },
            'args': [name_ref, output.id, input.id]
        }
        urls = {}
        ds, nc = job_utils.resolve_data_source_references(
            job_configs, job_exec_id, urls)
        self.assertEqual(2, len(ds))
        self.assertEqual([input.url, output_url, input.url], nc['args'])
        # Swift configs should be filled in since they were blank
        self.assertEqual(input.credentials['user'],
                         nc['configs']['fs.swift.service.sahara.username'])
        self.assertEqual(input.credentials['password'],
                         nc['configs']['fs.swift.service.sahara.password'])
        self.assertEqual(2, len(urls))
        self.assertItemsEqual(
            {
                input.id: (input_url, input_url),
                output.id: (output_url, output_url)
            }, urls)

        job_configs['configs'] = {
            'fs.swift.service.sahara.username': '******',
            'fs.swift.service.sahara.password': '******',
            job_utils.DATA_SOURCE_SUBST_NAME: False,
            job_utils.DATA_SOURCE_SUBST_UUID: True
        }
        ds, nc = job_utils.resolve_data_source_references(
            job_configs, job_exec_id, {})
        self.assertEqual(2, len(ds))
        self.assertEqual([name_ref, output_url, input.url], nc['args'])
        # Swift configs should not be overwritten
        self.assertEqual(job_configs['configs'], nc['configs'])

        job_configs['configs'] = {
            job_utils.DATA_SOURCE_SUBST_NAME: True,
            job_utils.DATA_SOURCE_SUBST_UUID: False
        }
        job_configs['proxy_configs'] = {
            'proxy_username': '******',
            'proxy_password': '******',
            'proxy_trust_id': 'trustme'
        }
        ds, nc = job_utils.resolve_data_source_references(
            job_configs, job_exec_id, {})
        self.assertEqual(1, len(ds))
        self.assertEqual([input.url, output.id, input.id], nc['args'])

        # Swift configs should be empty and proxy configs should be preserved
        self.assertEqual(job_configs['configs'], nc['configs'])
        self.assertEqual(job_configs['proxy_configs'], nc['proxy_configs'])

        # Substitution not enabled
        job_configs['configs'] = {
            job_utils.DATA_SOURCE_SUBST_NAME: False,
            job_utils.DATA_SOURCE_SUBST_UUID: False
        }
        ds, nc = job_utils.resolve_data_source_references(
            job_configs, job_exec_id, {})
        self.assertEqual(0, len(ds))
        self.assertEqual(job_configs['args'], nc['args'])
        self.assertEqual(job_configs['configs'], nc['configs'])

        # Substitution enabled but no values to modify
        job_configs['configs'] = {
            job_utils.DATA_SOURCE_SUBST_NAME: True,
            job_utils.DATA_SOURCE_SUBST_UUID: True
        }
        job_configs['args'] = ['val1', 'val2', 'val3']
        ds, nc = job_utils.resolve_data_source_references(
            job_configs, job_exec_id, {})
        self.assertEqual(0, len(ds))
        self.assertEqual(nc['args'], job_configs['args'])
        self.assertEqual(nc['configs'], job_configs['configs'])
コード例 #8
0
ファイル: engine.py プロジェクト: madar010/mad
    def run_job(self, job_execution):
        ctx = context.ctx()
        job = conductor.job_get(ctx, job_execution.job_id)

        # This will be a dictionary of tuples, (native_url, runtime_url)
        # keyed by data_source id
        data_source_urls = {}

        additional_sources, updated_job_configs = (
            job_utils.resolve_data_source_references(job_execution.job_configs,
                                                     job_execution.id,
                                                     data_source_urls,
                                                     self.cluster))

        job_execution = conductor.job_execution_update(
            ctx, job_execution,
            {"data_source_urls": job_utils.to_url_dict(data_source_urls)})

        # Now that we've recorded the native urls, we can switch to the
        # runtime urls
        data_source_urls = job_utils.to_url_dict(data_source_urls,
                                                 runtime=True)

        job_utils.prepare_cluster_for_ds(additional_sources, self.cluster,
                                         updated_job_configs, data_source_urls)

        # We'll always run the driver program on the master
        master = plugin_utils.get_instance(self.cluster, "nimbus")

        # TODO(tmckay): wf_dir should probably be configurable.
        # The only requirement is that the dir is writable by the image user
        wf_dir = job_utils.create_workflow_dir(master, '/tmp/storm-edp', job,
                                               job_execution.id, "700")

        paths = self._upload_job_files(master, wf_dir, job,
                                       updated_job_configs)

        topology_name = self._set_topology_name(job_execution, job.name)

        # Launch the storm job using storm jar
        host = master.hostname()
        cmd = self._build_command(paths, updated_job_configs, host,
                                  topology_name)

        job_execution = conductor.job_execution_get(ctx, job_execution.id)
        if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
            return (None, edp.JOB_STATUS_KILLED, None)

        ret, stdout = self._execute_remote_job(master, wf_dir, cmd)
        if ret == 0:
            # Success, we'll add the wf_dir in job_execution.extra and store
            # topology_name@instance_id as the job id
            # We know the job is running so return "RUNNING"
            return (topology_name + "@" + master.id, edp.JOB_STATUS_RUNNING, {
                'storm-path': wf_dir
            })

        # Hmm, no execption but something failed.
        # Since we're using backgrounding with redirect, this is unlikely.
        raise e.EDPError(
            _("Storm job execution failed. Exit status = "
              "%(status)s, stdout = %(stdout)s") % {
                  'status': ret,
                  'stdout': stdout
              })
コード例 #9
0
ファイル: engine.py プロジェクト: uladz/sahara
    def run_job(self, job_execution):
        ctx = context.ctx()

        # This will be a dictionary of tuples, (native_url, runtime_url)
        # keyed by data_source id
        data_source_urls = {}

        job = conductor.job_get(ctx, job_execution.job_id)
        input_source, output_source = job_utils.get_data_sources(
            job_execution, job, data_source_urls, self.cluster)

        # Updated_job_configs will be a copy of job_execution.job_configs with
        # any name or uuid references to data_sources resolved to paths
        # assuming substitution is enabled.
        # If substitution is not enabled then updated_job_configs will
        # just be a reference to job_execution.job_configs to avoid a copy.
        # Additional_sources will be a list of any data_sources found.
        additional_sources, updated_job_configs = (
            job_utils.resolve_data_source_references(job_execution.job_configs,
                                                     job_execution.id,
                                                     data_source_urls,
                                                     self.cluster)
        )

        job_execution = conductor.job_execution_update(
            ctx, job_execution,
            {"data_source_urls": job_utils.to_url_dict(data_source_urls)})

        # Now that we've recorded the native urls, we can switch to the
        # runtime urls
        data_source_urls = job_utils.to_url_dict(data_source_urls,
                                                 runtime=True)

        proxy_configs = updated_job_configs.get('proxy_configs')
        configs = updated_job_configs.get('configs', {})
        use_hbase_lib = configs.get('edp.hbase_common_lib', {})

        # Extract all the 'oozie.' configs so that they can be set in the
        # job properties file. These are config values for Oozie itself,
        # not the job code
        oozie_params = {}
        for k in list(configs):
            if k.startswith('oozie.'):
                oozie_params[k] = configs[k]

        for data_source in [input_source, output_source] + additional_sources:
            if data_source and data_source.type == 'hdfs':
                h.configure_cluster_for_hdfs(
                    self.cluster, data_source_urls[data_source.id])
                break

        external_hdfs_urls = self._resolve_external_hdfs_urls(
            job_execution.job_configs)
        for url in external_hdfs_urls:
            h.configure_cluster_for_hdfs(self.cluster, url)

        hdfs_user = self.get_hdfs_user()

        # TODO(tmckay): this should probably be "get_namenode"
        # but that call does not exist in the oozie engine api now.
        oozie_server = self.get_oozie_server(self.cluster)

        wf_dir = self._create_hdfs_workflow_dir(oozie_server, job)
        self._upload_job_files_to_hdfs(oozie_server, wf_dir, job, configs,
                                       proxy_configs)

        wf_xml = workflow_factory.get_workflow_xml(
            job, self.cluster, updated_job_configs,
            input_source, output_source,
            hdfs_user, data_source_urls)

        path_to_workflow = self._upload_workflow_file(oozie_server, wf_dir,
                                                      wf_xml, hdfs_user)

        job_params = self._get_oozie_job_params(hdfs_user,
                                                path_to_workflow,
                                                oozie_params,
                                                use_hbase_lib)

        client = self._get_client()
        oozie_job_id = client.add_job(x.create_hadoop_xml(job_params),
                                      job_execution)
        job_execution = conductor.job_execution_get(ctx, job_execution.id)
        if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
            return (None, edp.JOB_STATUS_KILLED, None)

        conductor.job_execution_update(
            context.ctx(), job_execution.id,
            {'info': {'status': edp.JOB_STATUS_READYTORUN},
             'engine_job_id': oozie_job_id})

        client.run_job(job_execution, oozie_job_id)
        try:
            status = client.get_job_info(job_execution, oozie_job_id)['status']
        except Exception:
            status = None
        return (oozie_job_id, status, None)
コード例 #10
0
    def _prepare_run_job(self, job_execution):
        ctx = context.ctx()

        # This will be a dictionary of tuples, (native_url, runtime_url)
        # keyed by data_source id
        data_source_urls = {}

        prepared_job_params = {}

        job = conductor.job_get(ctx, job_execution.job_id)
        input_source, output_source = job_utils.get_data_sources(
            job_execution, job, data_source_urls, self.cluster)

        # Updated_job_configs will be a copy of job_execution.job_configs with
        # any name or uuid references to data_sources resolved to paths
        # assuming substitution is enabled.
        # If substitution is not enabled then updated_job_configs will
        # just be a reference to job_execution.job_configs to avoid a copy.
        # Additional_sources will be a list of any data_sources found.
        additional_sources, updated_job_configs = (
            job_utils.resolve_data_source_references(job_execution.job_configs,
                                                     job_execution.id,
                                                     data_source_urls,
                                                     self.cluster)
        )

        job_execution = conductor.job_execution_update(
            ctx, job_execution,
            {"data_source_urls": job_utils.to_url_dict(data_source_urls)})

        # Now that we've recorded the native urls, we can switch to the
        # runtime urls
        data_source_urls = job_utils.to_url_dict(data_source_urls,
                                                 runtime=True)

        proxy_configs = updated_job_configs.get('proxy_configs')
        configs = updated_job_configs.get('configs', {})
        use_hbase_lib = configs.get('edp.hbase_common_lib', {})

        # Extract all the 'oozie.' configs so that they can be set in the
        # job properties file. These are config values for Oozie itself,
        # not the job code
        oozie_params = {}
        for k in list(configs):
            if k.startswith('oozie.'):
                oozie_params[k] = configs[k]

        for data_source in [input_source, output_source] + additional_sources:
            if data_source and data_source.type == 'hdfs':
                h.configure_cluster_for_hdfs(
                    self.cluster, data_source_urls[data_source.id])
                break

        external_hdfs_urls = self._resolve_external_hdfs_urls(
            job_execution.job_configs)
        for url in external_hdfs_urls:
            h.configure_cluster_for_hdfs(self.cluster, url)

        hdfs_user = self.get_hdfs_user()

        # TODO(tmckay): this should probably be "get_namenode"
        # but that call does not exist in the oozie engine api now.
        oozie_server = self.get_oozie_server(self.cluster)

        wf_dir = self._create_hdfs_workflow_dir(oozie_server, job)
        self._upload_job_files_to_hdfs(oozie_server, wf_dir, job, configs,
                                       proxy_configs)

        wf_xml = workflow_factory.get_workflow_xml(
            job, self.cluster, updated_job_configs,
            input_source, output_source,
            hdfs_user, data_source_urls)

        path_to_workflow = self._upload_workflow_file(oozie_server, wf_dir,
                                                      wf_xml, hdfs_user)

        prepared_job_params['context'] = ctx
        prepared_job_params['hdfs_user'] = hdfs_user
        prepared_job_params['path_to_workflow'] = path_to_workflow
        prepared_job_params['use_hbase_lib'] = use_hbase_lib
        prepared_job_params['job_execution'] = job_execution
        prepared_job_params['oozie_params'] = oozie_params
        prepared_job_params['wf_dir'] = wf_dir
        prepared_job_params['oozie_server'] = oozie_server

        return prepared_job_params
コード例 #11
0
ファイル: engine.py プロジェクト: rogeryu27/sahara
    def run_job(self, job_execution):
        ctx = context.ctx()

        # This will be a dictionary of tuples, (native_url, runtime_url)
        # keyed by data_source id
        data_source_urls = {}

        job = conductor.job_get(ctx, job_execution.job_id)
        input_source, output_source = job_utils.get_data_sources(
            job_execution, job, data_source_urls, self.cluster)

        # Updated_job_configs will be a copy of job_execution.job_configs with
        # any name or uuid references to data_sources resolved to paths
        # assuming substitution is enabled.
        # If substitution is not enabled then updated_job_configs will
        # just be a reference to job_execution.job_configs to avoid a copy.
        # Additional_sources will be a list of any data_sources found.
        additional_sources, updated_job_configs = (
            job_utils.resolve_data_source_references(job_execution.job_configs,
                                                     job_execution.id,
                                                     data_source_urls,
                                                     self.cluster)
        )

        job_execution = conductor.job_execution_update(
            ctx, job_execution,
            {"data_source_urls": job_utils.to_url_dict(data_source_urls)})

        # Now that we've recorded the native urls, we can switch to the
        # runtime urls
        data_source_urls = job_utils.to_url_dict(data_source_urls,
                                                 runtime=True)

        proxy_configs = updated_job_configs.get('proxy_configs')
        configs = updated_job_configs.get('configs', {})
        use_hbase_lib = configs.get('edp.hbase_common_lib', {})

        # Extract all the 'oozie.' configs so that they can be set in the
        # job properties file. These are config values for Oozie itself,
        # not the job code
        oozie_params = {}
        for k in list(configs):
            if k.startswith('oozie.'):
                oozie_params[k] = configs[k]

        for data_source in [input_source, output_source] + additional_sources:
            if data_source and data_source.type == 'hdfs':
                h.configure_cluster_for_hdfs(
                    self.cluster, data_source_urls[data_source.id])
                break

        external_hdfs_urls = self._resolve_external_hdfs_urls(
            job_execution.job_configs)
        for url in external_hdfs_urls:
            h.configure_cluster_for_hdfs(self.cluster, url)

        hdfs_user = self.get_hdfs_user()

        # TODO(tmckay): this should probably be "get_namenode"
        # but that call does not exist in the oozie engine api now.
        oozie_server = self.get_oozie_server(self.cluster)

        wf_dir = self._create_hdfs_workflow_dir(oozie_server, job)
        self._upload_job_files_to_hdfs(oozie_server, wf_dir, job, configs,
                                       proxy_configs)

        wf_xml = workflow_factory.get_workflow_xml(
            job, self.cluster, updated_job_configs,
            input_source, output_source,
            hdfs_user, data_source_urls)

        path_to_workflow = self._upload_workflow_file(oozie_server, wf_dir,
                                                      wf_xml, hdfs_user)

        job_params = self._get_oozie_job_params(hdfs_user,
                                                path_to_workflow,
                                                oozie_params,
                                                use_hbase_lib)

        client = self._get_client()
        oozie_job_id = client.add_job(x.create_hadoop_xml(job_params),
                                      job_execution)
        job_execution = conductor.job_execution_get(ctx, job_execution.id)
        if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
            return (None, edp.JOB_STATUS_KILLED, None)

        conductor.job_execution_update(
            context.ctx(), job_execution.id,
            {'info': {'status': edp.JOB_STATUS_READYTORUN},
             'engine_job_id': oozie_job_id})

        client.run_job(job_execution, oozie_job_id)
        try:
            status = client.get_job_status(job_execution,
                                           oozie_job_id)['status']
        except Exception:
            status = None
        return (oozie_job_id, status, None)
コード例 #12
0
ファイル: engine.py プロジェクト: Imperat/sahara
    def _prepare_run_job(self, job_execution):
        ctx = context.ctx()

        # This will be a dictionary of tuples, (native_url, runtime_url)
        # keyed by data_source id
        data_source_urls = {}

        prepared_job_params = {}

        job = conductor.job_get(ctx, job_execution.job_id)
        input_source, output_source = job_utils.get_data_sources(
            job_execution, job, data_source_urls, self.cluster)

        # Updated_job_configs will be a copy of job_execution.job_configs with
        # any name or uuid references to data_sources resolved to paths
        # assuming substitution is enabled.
        # If substitution is not enabled then updated_job_configs will
        # just be a reference to job_execution.job_configs to avoid a copy.
        # Additional_sources will be a list of any data_sources found.
        additional_sources, updated_job_configs = (
            job_utils.resolve_data_source_references(job_execution.job_configs,
                                                     job_execution.id,
                                                     data_source_urls,
                                                     self.cluster)
        )

        job_execution = conductor.job_execution_update(
            ctx, job_execution,
            {"data_source_urls": job_utils.to_url_dict(data_source_urls)})

        # Now that we've recorded the native urls, we can switch to the
        # runtime urls
        data_source_urls = job_utils.to_url_dict(data_source_urls,
                                                 runtime=True)

        proxy_configs = updated_job_configs.get('proxy_configs')
        configs = updated_job_configs.get('configs', {})
        use_hbase_lib = configs.get('edp.hbase_common_lib', {})

        # Extract all the 'oozie.' configs so that they can be set in the
        # job properties file. These are config values for Oozie itself,
        # not the job code
        oozie_params = {}
        for k in list(configs):
            if k.startswith('oozie.'):
                oozie_params[k] = configs[k]

        for data_source in [input_source, output_source] + additional_sources:
            if data_source and data_source.type == 'hdfs':
                h.configure_cluster_for_hdfs(
                    self.cluster, data_source_urls[data_source.id])
                break

        external_hdfs_urls = self._resolve_external_hdfs_urls(
            job_execution.job_configs)
        for url in external_hdfs_urls:
            h.configure_cluster_for_hdfs(self.cluster, url)

        hdfs_user = self.get_hdfs_user()

        # TODO(tmckay): this should probably be "get_namenode"
        # but that call does not exist in the oozie engine api now.
        oozie_server = self.get_oozie_server(self.cluster)

        wf_dir = self._create_hdfs_workflow_dir(oozie_server, job)
        self._upload_job_files_to_hdfs(oozie_server, wf_dir, job, configs,
                                       proxy_configs)

        wf_xml = workflow_factory.get_workflow_xml(
            job, self.cluster, updated_job_configs,
            input_source, output_source,
            hdfs_user, data_source_urls)

        path_to_workflow = self._upload_workflow_file(oozie_server, wf_dir,
                                                      wf_xml, hdfs_user)

        prepared_job_params['context'] = ctx
        prepared_job_params['hdfs_user'] = hdfs_user
        prepared_job_params['path_to_workflow'] = path_to_workflow
        prepared_job_params['use_hbase_lib'] = use_hbase_lib
        prepared_job_params['job_execution'] = job_execution
        prepared_job_params['oozie_params'] = oozie_params
        prepared_job_params['wf_dir'] = wf_dir
        prepared_job_params['oozie_server'] = oozie_server

        return prepared_job_params
コード例 #13
0
    def test_resolve_data_source_refs(self, data_source_get_all, ctx):

        ctx.return_value = 'dummy'

        name_ref = job_utils.DATA_SOURCE_PREFIX+'input'

        input = u.create_data_source("swift://container/input",
                                     name="input",
                                     id=six.text_type(uuid.uuid4()))

        output = u.create_data_source("swift://container/output",
                                      name="output",
                                      id=six.text_type(uuid.uuid4()))

        by_name = {'input': input,
                   'output': output}

        by_id = {input.id: input,
                 output.id: output}

        # Pretend to be the database
        def _get_all(ctx, **kwargs):
            name = kwargs.get('name')
            if name in by_name:
                name_list = [by_name[name]]
            else:
                name_list = []

            id = kwargs.get('id')
            if id in by_id:
                id_list = [by_id[id]]
            else:
                id_list = []
            return list(set(name_list + id_list))

        data_source_get_all.side_effect = _get_all

        job_configs = {
            'configs': {
                job_utils.DATA_SOURCE_SUBST_NAME: True,
                job_utils.DATA_SOURCE_SUBST_UUID: True},
            'args': [name_ref, output.id, input.id]}

        ds, nc = job_utils.resolve_data_source_references(job_configs)
        self.assertEqual(2, len(ds))
        self.assertEqual([input.url, output.url, input.url], nc['args'])
        # Swift configs should be filled in since they were blank
        self.assertEqual(input.credentials['user'],
                         nc['configs']['fs.swift.service.sahara.username'])
        self.assertEqual(input.credentials['password'],
                         nc['configs']['fs.swift.service.sahara.password'])

        job_configs['configs'] = {'fs.swift.service.sahara.username': '******',
                                  'fs.swift.service.sahara.password': '******',
                                  job_utils.DATA_SOURCE_SUBST_NAME: False,
                                  job_utils.DATA_SOURCE_SUBST_UUID: True}
        ds, nc = job_utils.resolve_data_source_references(job_configs)
        self.assertEqual(2, len(ds))
        self.assertEqual([name_ref, output.url, input.url], nc['args'])
        # Swift configs should not be overwritten
        self.assertEqual(job_configs['configs'], nc['configs'])

        job_configs['configs'] = {job_utils.DATA_SOURCE_SUBST_NAME: True,
                                  job_utils.DATA_SOURCE_SUBST_UUID: False}
        job_configs['proxy_configs'] = {'proxy_username': '******',
                                        'proxy_password': '******',
                                        'proxy_trust_id': 'trustme'}
        ds, nc = job_utils.resolve_data_source_references(job_configs)
        self.assertEqual(1, len(ds))
        self.assertEqual([input.url, output.id, input.id], nc['args'])

        # Swift configs should be empty and proxy configs should be preserved
        self.assertEqual(job_configs['configs'], nc['configs'])
        self.assertEqual(job_configs['proxy_configs'], nc['proxy_configs'])

        # Substitution not enabled
        job_configs['configs'] = {job_utils.DATA_SOURCE_SUBST_NAME: False,
                                  job_utils.DATA_SOURCE_SUBST_UUID: False}
        ds, nc = job_utils.resolve_data_source_references(job_configs)
        self.assertEqual(0, len(ds))
        self.assertEqual(job_configs['args'], nc['args'])
        self.assertEqual(job_configs['configs'], nc['configs'])

        # Substitution enabled but no values to modify
        job_configs['configs'] = {job_utils.DATA_SOURCE_SUBST_NAME: True,
                                  job_utils.DATA_SOURCE_SUBST_UUID: True}
        job_configs['args'] = ['val1', 'val2', 'val3']
        ds, nc = job_utils.resolve_data_source_references(job_configs)
        self.assertEqual(0, len(ds))
        self.assertEqual(job_configs['args'], nc['args'])
        self.assertEqual(job_configs['configs'], nc['configs'])
コード例 #14
0
ファイル: engine.py プロジェクト: openstack/sahara
    def run_job(self, job_execution):
        ctx = context.ctx()
        job = conductor.job_get(ctx, job_execution.job_id)

        # This will be a dictionary of tuples, (native_url, runtime_url)
        # keyed by data_source id
        data_source_urls = {}

        additional_sources, updated_job_configs = (
            job_utils.resolve_data_source_references(job_execution.job_configs,
                                                     job_execution.id,
                                                     data_source_urls,
                                                     self.cluster)
        )

        job_execution = conductor.job_execution_update(
            ctx, job_execution,
            {"data_source_urls": job_utils.to_url_dict(data_source_urls)})

        # Now that we've recorded the native urls, we can switch to the
        # runtime urls
        data_source_urls = job_utils.to_url_dict(data_source_urls,
                                                 runtime=True)

        job_utils.prepare_cluster_for_ds(additional_sources,
                                         self.cluster, updated_job_configs,
                                         data_source_urls)

        # We'll always run the driver program on the master
        master = plugin_utils.get_instance(self.cluster, "nimbus")

        # TODO(tmckay): wf_dir should probably be configurable.
        # The only requirement is that the dir is writable by the image user
        wf_dir = job_utils.create_workflow_dir(master, '/tmp/storm-edp', job,
                                               job_execution.id, "700")

        paths = self._upload_job_files(master, wf_dir, job,
                                       updated_job_configs)

        topology_name = self._set_topology_name(job_execution, job.name)

        # Launch the storm job using storm jar
        host = master.hostname()
        cmd = self._build_command(paths, updated_job_configs, host,
                                  topology_name)

        job_execution = conductor.job_execution_get(ctx, job_execution.id)
        if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
            return (None, edp.JOB_STATUS_KILLED, None)

        ret, stdout = self._execute_remote_job(master, wf_dir, cmd)
        if ret == 0:
            # Success, we'll add the wf_dir in job_execution.extra and store
            # topology_name@instance_id as the job id
            # We know the job is running so return "RUNNING"
            return (topology_name + "@" + master.id,
                    edp.JOB_STATUS_RUNNING,
                    {'storm-path': wf_dir})

        # Hmm, no execption but something failed.
        # Since we're using backgrounding with redirect, this is unlikely.
        raise e.EDPError(_("Storm job execution failed. Exit status = "
                           "%(status)s, stdout = %(stdout)s") %
                         {'status': ret, 'stdout': stdout})
コード例 #15
0
ファイル: engine.py プロジェクト: snowind/sahara
    def run_job(self, job_execution):
        ctx = context.ctx()

        data_source_urls = {}

        job = conductor.job_get(ctx, job_execution.job_id)
        input_source, output_source = job_utils.get_data_sources(job_execution, job, data_source_urls)

        # Updated_job_configs will be a copy of job_execution.job_configs with
        # any name or uuid references to data_sources resolved to paths
        # assuming substitution is enabled.
        # If substitution is not enabled then updated_job_configs will
        # just be a reference to job_execution.job_configs to avoid a copy.
        # Additional_sources will be a list of any data_sources found.
        additional_sources, updated_job_configs = job_utils.resolve_data_source_references(
            job_execution.job_configs, job_execution.id, data_source_urls
        )

        job_execution = conductor.job_execution_update(ctx, job_execution, {"data_source_urls": data_source_urls})

        proxy_configs = updated_job_configs.get("proxy_configs")
        configs = updated_job_configs.get("configs", {})
        use_hbase_lib = configs.get("edp.hbase_common_lib", {})

        # Extract all the 'oozie.' configs so that they can be set in the
        # job properties file. These are config values for Oozie itself,
        # not the job code
        oozie_params = {}
        for k in list(configs):
            if k.startswith("oozie."):
                oozie_params[k] = configs[k]

        for data_source in [input_source, output_source] + additional_sources:
            if data_source and data_source.type == "hdfs":
                h.configure_cluster_for_hdfs(self.cluster, data_source_urls[data_source.id])
                break

        hdfs_user = self.get_hdfs_user()

        # TODO(tmckay): this should probably be "get_namenode"
        # but that call does not exist in the oozie engine api now.
        oozie_server = self.get_oozie_server(self.cluster)

        wf_dir = self._create_hdfs_workflow_dir(oozie_server, job)
        self._upload_job_files_to_hdfs(oozie_server, wf_dir, job, configs, proxy_configs)

        wf_xml = workflow_factory.get_workflow_xml(
            job, self.cluster, updated_job_configs, input_source, output_source, hdfs_user, data_source_urls
        )

        path_to_workflow = self._upload_workflow_file(oozie_server, wf_dir, wf_xml, hdfs_user)

        job_params = self._get_oozie_job_params(hdfs_user, path_to_workflow, oozie_params, use_hbase_lib)

        client = self._get_client()
        oozie_job_id = client.add_job(x.create_hadoop_xml(job_params), job_execution)
        job_execution = conductor.job_execution_get(ctx, job_execution.id)
        if job_execution.info["status"] == edp.JOB_STATUS_TOBEKILLED:
            return (None, edp.JOB_STATUS_KILLED, None)
        client.run_job(job_execution, oozie_job_id)
        try:
            status = client.get_job_status(job_execution, oozie_job_id)["status"]
        except Exception:
            status = None
        return (oozie_job_id, status, None)
コード例 #16
0
ファイル: engine.py プロジェクト: zhangjunli177/sahara
    def run_job(self, job_execution):
        ctx = context.ctx()
        job = conductor.job_get(ctx, job_execution.job_id)
        # This will be a dictionary of tuples, (native_url, runtime_url)
        # keyed by data_source id
        data_source_urls = {}
        additional_sources, updated_job_configs = (
            job_utils.resolve_data_source_references(job_execution.job_configs,
                                                     job_execution.id,
                                                     data_source_urls,
                                                     self.cluster)
        )

        job_execution = conductor.job_execution_update(
            ctx, job_execution,
            {"data_source_urls": job_utils.to_url_dict(data_source_urls)})

        # Now that we've recorded the native urls, we can switch to the
        # runtime urls
        data_source_urls = job_utils.to_url_dict(data_source_urls,
                                                 runtime=True)

        for data_source in additional_sources:
            if data_source and data_source.type == 'hdfs':
                h.configure_cluster_for_hdfs(self.cluster, data_source)
                break

        # It is needed in case we are working with Spark plugin
        self.plugin_params['master'] = (
            self.plugin_params['master'] % {'host': self.master.hostname()})

        # TODO(tmckay): wf_dir should probably be configurable.
        # The only requirement is that the dir is writable by the image user
        wf_dir = job_utils.create_workflow_dir(self.master, '/tmp/spark-edp',
                                               job, job_execution.id, "700")
        paths, builtin_paths = self._upload_job_files(
            self.master, wf_dir, job, updated_job_configs)

        # We can shorten the paths in this case since we'll run out of wf_dir
        paths = [os.path.basename(p) if p.startswith(wf_dir) else p
                 for p in paths]
        builtin_paths = [os.path.basename(p) for p in builtin_paths]

        cmd = self._build_command(wf_dir, paths, builtin_paths,
                                  updated_job_configs)

        job_execution = conductor.job_execution_get(ctx, job_execution.id)
        if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
            return (None, edp.JOB_STATUS_KILLED, None)

        # If an exception is raised here, the job_manager will mark
        # the job failed and log the exception
        # The redirects of stdout and stderr will preserve output in the wf_dir
        with remote.get_remote(self.master) as r:
            # Upload the command launch script
            launch = os.path.join(wf_dir, "launch_command")
            r.write_file_to(launch, self._job_script())
            r.execute_command("chmod u+rwx,g+rx,o+rx %s" % wf_dir)
            r.execute_command("chmod +x %s" % launch)
            ret, stdout = r.execute_command(
                "cd %s; ./launch_command %s > /dev/null 2>&1 & echo $!"
                % (wf_dir, cmd))

        if ret == 0:
            # Success, we'll add the wf_dir in job_execution.extra and store
            # pid@instance_id as the job id

            # We know the job is running so return "RUNNING"
            return (stdout.strip() + "@" + self.master.id,
                    edp.JOB_STATUS_RUNNING,
                    {'spark-path': wf_dir})

        # Hmm, no execption but something failed.
        # Since we're using backgrounding with redirect, this is unlikely.
        raise e.EDPError(_("Spark job execution failed. Exit status = "
                           "%(status)s, stdout = %(stdout)s") %
                         {'status': ret, 'stdout': stdout})
コード例 #17
0
ファイル: engine.py プロジェクト: lwlzpn84/sahara
    def run_job(self, job_execution):
        ctx = context.ctx()
        job = conductor.job_get(ctx, job_execution.job_id)
        # This will be a dictionary of tuples, (native_url, runtime_url)
        # keyed by data_source id
        data_source_urls = {}
        additional_sources, updated_job_configs = (
            job_utils.resolve_data_source_references(job_execution.job_configs,
                                                     job_execution.id,
                                                     data_source_urls,
                                                     self.cluster)
        )

        job_execution = conductor.job_execution_update(
            ctx, job_execution,
            {"data_source_urls": job_utils.to_url_dict(data_source_urls)})

        # Now that we've recorded the native urls, we can switch to the
        # runtime urls
        data_source_urls = job_utils.to_url_dict(data_source_urls,
                                                 runtime=True)

        job_utils.prepare_cluster_for_ds(additional_sources,
                                         self.cluster, updated_job_configs,
                                         data_source_urls)

        # It is needed in case we are working with Spark plugin
        self.plugin_params['master'] = (
            self.plugin_params['master'] % {'host': self.master.hostname()})

        # TODO(tmckay): wf_dir should probably be configurable.
        # The only requirement is that the dir is writable by the image user
        wf_dir = job_utils.create_workflow_dir(self.master, '/tmp/spark-edp',
                                               job, job_execution.id, "700")
        paths, builtin_paths = self._upload_job_files(
            self.master, wf_dir, job, updated_job_configs)

        # We can shorten the paths in this case since we'll run out of wf_dir
        paths = [os.path.basename(p) if p.startswith(wf_dir) else p
                 for p in paths]
        builtin_paths = [os.path.basename(p) for p in builtin_paths]

        cmd = self._build_command(wf_dir, paths, builtin_paths,
                                  updated_job_configs)

        job_execution = conductor.job_execution_get(ctx, job_execution.id)
        if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED:
            return (None, edp.JOB_STATUS_KILLED, None)

        # If an exception is raised here, the job_manager will mark
        # the job failed and log the exception
        # The redirects of stdout and stderr will preserve output in the wf_dir
        with remote.get_remote(self.master) as r:
            # Upload the command launch script
            launch = os.path.join(wf_dir, "launch_command")
            r.write_file_to(launch, self._job_script())
            r.execute_command("chmod u+rwx,g+rx,o+rx %s" % wf_dir)
            r.execute_command("chmod +x %s" % launch)
            ret, stdout = r.execute_command(
                "cd %s; ./launch_command %s > /dev/null 2>&1 & echo $!"
                % (wf_dir, cmd))

        if ret == 0:
            # Success, we'll add the wf_dir in job_execution.extra and store
            # pid@instance_id as the job id

            # We know the job is running so return "RUNNING"
            return (stdout.strip() + "@" + self.master.id,
                    edp.JOB_STATUS_RUNNING,
                    {'spark-path': wf_dir})

        # Hmm, no execption but something failed.
        # Since we're using backgrounding with redirect, this is unlikely.
        raise e.EDPError(_("Spark job execution failed. Exit status = "
                           "%(status)s, stdout = %(stdout)s") %
                         {'status': ret, 'stdout': stdout})
コード例 #18
0
    def test_resolve_data_source_refs(self, data_source_get_all, ctx):

        ctx.return_value = 'dummy'

        name_ref = job_utils.DATA_SOURCE_PREFIX + 'input'
        job_exec_id = uuidutils.generate_uuid()

        input_url = "swift://container/input"
        input = u.create_data_source(input_url,
                                     name="input",
                                     id=uuidutils.generate_uuid())

        output = u.create_data_source("swift://container/output.%JOB_EXEC_ID%",
                                      name="output",
                                      id=uuidutils.generate_uuid())
        output_url = "swift://container/output." + job_exec_id

        by_name = {'input': input, 'output': output}

        by_id = {input.id: input, output.id: output}

        # Pretend to be the database
        def _get_all(ctx, **kwargs):
            name = kwargs.get('name')
            if name in by_name:
                name_list = [by_name[name]]
            else:
                name_list = []

            id = kwargs.get('id')
            if id in by_id:
                id_list = [by_id[id]]
            else:
                id_list = []
            return list(set(name_list + id_list))

        data_source_get_all.side_effect = _get_all

        job_configs = {
            'configs': {
                job_utils.DATA_SOURCE_SUBST_NAME: True,
                job_utils.DATA_SOURCE_SUBST_UUID: True
            },
            'args': [name_ref, output.id, input.id]
        }
        urls = {}
        ds, nc = job_utils.resolve_data_source_references(
            job_configs, job_exec_id, urls)
        self.assertEqual(2, len(ds))
        self.assertEqual([input.url, output_url, input.url], nc['args'])

        # Substitution not enabled
        job_configs['configs'] = {
            job_utils.DATA_SOURCE_SUBST_NAME: False,
            job_utils.DATA_SOURCE_SUBST_UUID: False
        }
        ds, nc = job_utils.resolve_data_source_references(
            job_configs, job_exec_id, {})
        self.assertEqual(0, len(ds))
        self.assertEqual(job_configs['args'], nc['args'])
        self.assertEqual(job_configs['configs'], nc['configs'])

        # Substitution enabled but no values to modify
        job_configs['configs'] = {
            job_utils.DATA_SOURCE_SUBST_NAME: True,
            job_utils.DATA_SOURCE_SUBST_UUID: True
        }
        job_configs['args'] = ['val1', 'val2', 'val3']
        ds, nc = job_utils.resolve_data_source_references(
            job_configs, job_exec_id, {})
        self.assertEqual(0, len(ds))
        self.assertEqual(nc['args'], job_configs['args'])
        self.assertEqual(nc['configs'], job_configs['configs'])