Ejemplo n.º 1
0
    def submit_jobs(self, job_files):
        task = self.task
        pool = get_param(task.htcondor_pool)
        scheduler = get_param(task.htcondor_scheduler)

        # prepare objects for dumping intermediate submission data
        dump_freq = task.htcondor_dump_intermediate_submission_data()
        if dump_freq and not is_number(dump_freq):
            dump_freq = 50

        # progress callback to inform the scheduler
        def progress_callback(i, job_ids):
            job_num = i + 1
            job_id = job_ids[0]

            # set the job id early
            self.submission_data.jobs[job_num]["job_id"] = job_id

            # log a message every 25 jobs
            if job_num in (1, len(job_files)) or job_num % 25 == 0:
                task.publish_message("submitted {}/{} job(s)".format(
                    job_num, len(job_files)))

            # dump intermediate submission data with a certain frequency
            if dump_freq and job_num % dump_freq == 0:
                self.dump_submission_data()

        return self.job_manager.submit_batch(job_files,
                                             pool=pool,
                                             scheduler=scheduler,
                                             retries=3,
                                             threads=task.threads,
                                             callback=progress_callback)
Ejemplo n.º 2
0
Archivo: remote.py Proyecto: bfis/law
    def submit_jobs(self, job_files, **kwargs):
        task = self.task

        # prepare objects for dumping intermediate submission data
        dump_freq = self._get_task_attribute(
            "dump_intermediate_submission_data")()
        if dump_freq and not is_number(dump_freq):
            dump_freq = 50

        # progress callback to inform the scheduler
        def progress_callback(i, job_id):
            job_num = i + 1

            # some job managers respond with a list of job ids per submission (e.g. htcondor, slurm)
            # batched submission is not yet supported, so get the first id
            if isinstance(job_id, list):
                job_id = job_id[0]

            # set the job id early
            self.submission_data.jobs[job_num]["job_id"] = job_id

            # log a message every 25 jobs
            if job_num in (1, len(job_files)) or job_num % 25 == 0:
                task.publish_message("submitted {}/{} job(s)".format(
                    job_num, len(job_files)))

            # dump intermediate submission data with a certain frequency
            if dump_freq and job_num % dump_freq == 0:
                self.dump_submission_data()

        # get job kwargs for submission and merge with passed kwargs
        submit_kwargs = self._get_job_kwargs("submit")
        submit_kwargs = merge_dicts(submit_kwargs, kwargs)

        return self.job_manager.submit_batch(job_files,
                                             retries=3,
                                             threads=task.threads,
                                             callback=progress_callback,
                                             **submit_kwargs)
Ejemplo n.º 3
0
    def _submit(self, job_files, **kwargs):
        task = self.task

        # job_files is an ordered mapping job_num -> {"job": PATH, "log": PATH/None}, get keys and
        # values for faster lookup by numeric index
        job_nums = list(job_files.keys())
        job_files = [f["job"] for f in six.itervalues(job_files)]

        # prepare objects for dumping intermediate submission data
        dump_freq = self._get_task_attribute("dump_intermediate_submission_data", True)()
        if dump_freq and not is_number(dump_freq):
            dump_freq = 50

        # get job kwargs for submission and merge with passed kwargs
        submit_kwargs = self._get_job_kwargs("submit")
        submit_kwargs = merge_dicts(submit_kwargs, kwargs)

        # progress callback to inform the scheduler
        def progress_callback(i, job_id):
            job_num = job_nums[i]

            # some job managers respond with a list of job ids per submission (e.g. htcondor, slurm)
            # so get the first id as long as batched submission is not yet supported
            if isinstance(job_id, list) and not self.job_manager.chunk_size_submit:
                job_id = job_id[0]

            # set the job id early
            self.submission_data.jobs[job_num]["job_id"] = job_id

            # log a message every 25 jobs
            if i in (0, len(job_files) - 1) or (i + 1) % 25 == 0:
                task.publish_message("submitted {}/{} job(s)".format(i + 1, len(job_files)))

            # dump intermediate submission data with a certain frequency
            if dump_freq and (i + 1) % dump_freq == 0:
                self.dump_submission_data()

        return self.job_manager.submit_batch(job_files, retries=3, threads=task.threads,
            callback=progress_callback, **submit_kwargs)
Ejemplo n.º 4
0
    def submit_jobs(self, job_files):
        task = self.task

        # delegate the voms proxy to all endpoints
        if self.delegation_ids is None and callable(task.glite_delegate_proxy):
            self.delegation_ids = []
            for ce in task.glite_ce:
                endpoint = get_ce_endpoint(ce)
                self.delegation_ids.append(task.glite_delegate_proxy(endpoint))

        # prepare objects for dumping intermediate submission data
        dump_freq = task.glite_dump_intermediate_submission_data()
        if dump_freq and not is_number(dump_freq):
            dump_freq = 50

        # progress callback to inform the scheduler
        def progress_callback(i, job_id):
            job_num = i + 1

            # set the job id early
            self.submission_data.jobs[job_num]["job_id"] = job_id

            # log a message every 25 jobs
            if job_num in (1, len(job_files)) or job_num % 25 == 0:
                task.publish_message("submitted {}/{} job(s)".format(
                    job_num, len(job_files)))

            # dump intermediate submission data with a certain frequency
            if dump_freq and job_num % dump_freq == 0:
                self.dump_submission_data()

        return self.job_manager.submit_batch(job_files,
                                             ce=task.glite_ce,
                                             delegation_id=self.delegation_ids,
                                             retries=3,
                                             threads=task.threads,
                                             callback=progress_callback)