Esempio n. 1
0
    def deserialize(self, text_string):
        # fill self with json dictionary
        try:
            self.__dict__ = deserialize(text_string)
        except:
            tolog(
                ' received exception while converting json string to ArgoJob: '
                + str(sys.exc_info()[1]))
            raise

        # convert unicode strings to strings
        self.preprocess = convert_unicode_string(self.preprocess)
        self.preprocess_args = convert_unicode_string(self.preprocess_args)
        self.postprocess = convert_unicode_string(self.postprocess)
        self.postprocess_args = convert_unicode_string(self.postprocess_args)
        self.input_url = convert_unicode_string(self.input_url)
        self.output_url = convert_unicode_string(self.output_url)
        self.username = convert_unicode_string(self.username)
        self.group_identifier = convert_unicode_string(self.group_identifier)
        self.job_status_routing_key = convert_unicode_string(
            self.job_status_routing_key)

        # need to convert vector of json job strings to objects
        tmp_jobs = []
        for job_dictionary in self.jobs:
            tmp_job = BalsamJob()
            tmp_job.__dict__ = job_dictionary
            tmp_jobs.append(tmp_job)
        # now copy into job list
        self.jobs = tmp_jobs
Esempio n. 2
0
    def deserialize(self, text_string):
        # fill self with json dictionary
        try:
            self.__dict__ = deserialize(text_string)
        except:
            tolog(" received exception while converting json string to ArgoJob: " + str(sys.exc_info()[1]))
            raise

        # convert unicode strings to strings
        self.preprocess = convert_unicode_string(self.preprocess)
        self.preprocess_args = convert_unicode_string(self.preprocess_args)
        self.postprocess = convert_unicode_string(self.postprocess)
        self.postprocess_args = convert_unicode_string(self.postprocess_args)
        self.input_url = convert_unicode_string(self.input_url)
        self.output_url = convert_unicode_string(self.output_url)
        self.username = convert_unicode_string(self.username)
        self.group_identifier = convert_unicode_string(self.group_identifier)
        self.job_status_routing_key = convert_unicode_string(self.job_status_routing_key)

        # need to convert vector of json job strings to objects
        tmp_jobs = []
        for job_dictionary in self.jobs:
            tmp_job = BalsamJob()
            tmp_job.__dict__ = job_dictionary
            tmp_jobs.append(tmp_job)
        # now copy into job list
        self.jobs = tmp_jobs
Esempio n. 3
0
    def get_argo_job(self, job):

        ##-----------------------
        # create argo job
        ##-----------------------
        argo_job = ArgoJob()
        argo_job.input_url = None #self.GRID_FTP_PROTOCOL + self.GRID_FTP_SERVER + self.job_path
        if self.input_url is not None:
            argo_job.input_url = self.input_url
        
        argo_job.output_url = self.grid_ftp_protocol + self.grid_ftp_server + self.job_path
        if self.output_url is not None:
            argo_job.output_url = self.output_url
        
        argo_job.username           = self.username
        argo_job.group_identifier   = self.group_identifier
        
        ##-----------------------
        # create get alpgen input cards balsam job
        ##-----------------------
        
        input_file_imode0 = self.base_filename + '.input.0'
        input_file_imode1 = self.base_filename + '.input.1'
        input_file_imode2 = self.base_filename + '.input.2'
        
        input_cards_job = BalsamJob()
        input_cards_job.executable          = self.athena_input_card_executable
        input_cards_job.executable_args     = ('-e ' + self.ecm 
                                            + ' -r ' + self.run_number 
                                            + ' -o ' + self.job_config 
                                            + ' -j ' + self.evgen_job_opts)
        input_cards_job.output_files        = [input_file_imode0,
                                               input_file_imode1,
                                               input_file_imode2,
                                               self.athena_postprocess_log]
        input_cards_job.nodes               = 1
        input_cards_job.processes_per_node  = 1
        input_cards_job.wall_minutes        = 0 # running on condor cluster so does not need time
        input_cards_job.username            = self.username
        input_cards_job.target_site         = self.serial_site
        input_cards_job.postprocess         = self.athena_postprocess
        input_cards_job.postprocess_args    = (' -i ' + self.athena_input_card_name + ' -p ' + self.process 
                                               + ' -n ' + str(self.evtgen_phase1_number_events)
                                               + ' --log-filename=' + str(self.athena_postprocess_log))
        if self.warmup_phase0_number_events is not None:
            input_cards_job.postprocess_args += ' --wmp-evts-itr=' + str(self.warmup_phase0_number_events)
        if self.warmup_phase0_number_iterations is not None:
            input_cards_job.postprocess_args += ' --wmp-nitr=' + str(self.warmup_phase0_number_iterations)
        if self.warmup_phase1_number_events is not None:
            input_cards_job.postprocess_args += ' --wmp-evts=' + str(self.warmup_phase1_number_events)
        
        
        argo_job.add_job(input_cards_job)
        
        ##-----------------------
        # create warm-up job
        ##-----------------------
        
        # create grid filenames
        grid1 = self.base_filename + '.grid1'
        grid2 = self.base_filename + '.grid2'
        
        # create warmup balsam job
        warmup = BalsamJob()
        warmup.executable          = self.process + 'gen90_mpi'
        warmup.executable_args     = input_file_imode0
        warmup.input_files         = [input_file_imode0]
        warmup.output_files        = [grid1,grid2]
        warmup.nodes               = 1
        warmup.processes_per_node  = 1
        warmup.wall_minutes        = 0 # running on condor cluster so does not need time
        warmup.username            = self.username
        warmup.target_site         = self.serial_site
        warmup.preprocess          = self.warmup_preprocess
        
        argo_job.add_job(warmup)
        
        ##-----------------------
        # create event generation job
        ##-----------------------
        
        # create executable
        alpgen_exe = self.process + 'gen90_mpi_ramdisk_nomrstpdfs'
        if 'argo_cluster' in self.parallel_site: # no ramdisk needed on argo_cluster
            alpgen_exe = self.process + 'gen90_mpi'
        
        # create filenames
        unw      = self.base_filename + '.unw.gz'
        unw_par  = self.base_filename + '_unw.par'
        wgt      = self.base_filename + '.wgt'
        wgt_par  = self.base_filename + '.par'
        directoryList_before = 'directoryList_before.txt'
        directoryList_after  = 'directoryList_after.txt'
        
        # create event gen balsam job
        evtgen = BalsamJob()
        evtgen.executable          = self.evtgen_executable
        evtgen.executable_args     = (alpgen_exe + ' ' + input_file_imode1 + ' ' 
                                      + input_file_imode2 + ' ' + str(self.evtgen_processes_per_node))
        evtgen.input_files         = [grid1,
                                      grid2,
                                      input_file_imode1,
                                      input_file_imode2]
        evtgen.output_files        = [unw,
                                      unw_par,
                                      directoryList_before,
                                      directoryList_after,
                                      self.evtgen_postprocess + '.out',
                                      self.evtgen_postprocess + '.err',
                                     ]
        evtgen.preprocess          = self.evtgen_preprocess
        evtgen.postprocess         = self.evtgen_postprocess
        evtgen.postprocess_args    = self.base_filename
        evtgen.nodes               = self.evtgen_nodes
        evtgen.processes_per_node  = self.evtgen_processes_per_node
        evtgen.wall_minutes        = self.evtgen_wall_minutes
        evtgen.username            = self.username
        evtgen.scheduler_args      = self.evtgen_scheduler_args
        evtgen.target_site         = self.parallel_site
        
        argo_job.add_job(evtgen)
        
        return argo_job
Esempio n. 4
0
    def get_argo_job(self, job):

        ##-----------------------
        # create argo job
        ##-----------------------
        argo_job = ArgoJob()
        argo_job.input_url = None  #self.GRID_FTP_PROTOCOL + self.GRID_FTP_SERVER + self.job_path
        if self.input_url is not None:
            argo_job.input_url = self.input_url

        argo_job.output_url = self.grid_ftp_protocol + self.grid_ftp_server + self.job_path
        if self.output_url is not None:
            argo_job.output_url = self.output_url

        argo_job.username = self.username
        argo_job.group_identifier = self.group_identifier

        ##-----------------------
        # create get alpgen input cards balsam job
        ##-----------------------

        input_file_imode0 = self.base_filename + '.input.0'
        input_file_imode1 = self.base_filename + '.input.1'
        input_file_imode2 = self.base_filename + '.input.2'

        input_cards_job = BalsamJob()
        input_cards_job.executable = self.athena_input_card_executable
        input_cards_job.executable_args = ('-e ' + self.ecm + ' -r ' +
                                           self.run_number + ' -o ' +
                                           self.job_config + ' -j ' +
                                           self.evgen_job_opts)
        input_cards_job.output_files = [
            input_file_imode0, input_file_imode1, input_file_imode2,
            self.athena_postprocess_log
        ]
        input_cards_job.nodes = 1
        input_cards_job.processes_per_node = 1
        input_cards_job.wall_minutes = 0  # running on condor cluster so does not need time
        input_cards_job.username = self.username
        input_cards_job.target_site = self.serial_site
        input_cards_job.postprocess = self.athena_postprocess
        input_cards_job.postprocess_args = (
            ' -i ' + self.athena_input_card_name + ' -p ' + self.process +
            ' -n ' + str(self.evtgen_phase1_number_events) +
            ' --log-filename=' + str(self.athena_postprocess_log))
        if self.warmup_phase0_number_events is not None:
            input_cards_job.postprocess_args += ' --wmp-evts-itr=' + str(
                self.warmup_phase0_number_events)
        if self.warmup_phase0_number_iterations is not None:
            input_cards_job.postprocess_args += ' --wmp-nitr=' + str(
                self.warmup_phase0_number_iterations)
        if self.warmup_phase1_number_events is not None:
            input_cards_job.postprocess_args += ' --wmp-evts=' + str(
                self.warmup_phase1_number_events)

        argo_job.add_job(input_cards_job)

        ##-----------------------
        # create warm-up job
        ##-----------------------

        # create grid filenames
        grid1 = self.base_filename + '.grid1'
        grid2 = self.base_filename + '.grid2'

        # create warmup balsam job
        warmup = BalsamJob()
        warmup.executable = self.process + 'gen90_mpi'
        warmup.executable_args = input_file_imode0
        warmup.input_files = [input_file_imode0]
        warmup.output_files = [grid1, grid2]
        warmup.nodes = 1
        warmup.processes_per_node = 1
        warmup.wall_minutes = 0  # running on condor cluster so does not need time
        warmup.username = self.username
        warmup.target_site = self.serial_site
        warmup.preprocess = self.warmup_preprocess

        argo_job.add_job(warmup)

        ##-----------------------
        # create event generation job
        ##-----------------------

        # create executable
        alpgen_exe = self.process + 'gen90_mpi_ramdisk_nomrstpdfs'
        if 'argo_cluster' in self.parallel_site:  # no ramdisk needed on argo_cluster
            alpgen_exe = self.process + 'gen90_mpi'

        # create filenames
        unw = self.base_filename + '.unw.gz'
        unw_par = self.base_filename + '_unw.par'
        wgt = self.base_filename + '.wgt'
        wgt_par = self.base_filename + '.par'
        directoryList_before = 'directoryList_before.txt'
        directoryList_after = 'directoryList_after.txt'

        # create event gen balsam job
        evtgen = BalsamJob()
        evtgen.executable = self.evtgen_executable
        evtgen.executable_args = (alpgen_exe + ' ' + input_file_imode1 + ' ' +
                                  input_file_imode2 + ' ' +
                                  str(self.evtgen_processes_per_node))
        evtgen.input_files = [
            grid1, grid2, input_file_imode1, input_file_imode2
        ]
        evtgen.output_files = [
            unw,
            unw_par,
            directoryList_before,
            directoryList_after,
            self.evtgen_postprocess + '.out',
            self.evtgen_postprocess + '.err',
        ]
        evtgen.preprocess = self.evtgen_preprocess
        evtgen.postprocess = self.evtgen_postprocess
        evtgen.postprocess_args = self.base_filename
        evtgen.nodes = self.evtgen_nodes
        evtgen.processes_per_node = self.evtgen_processes_per_node
        evtgen.wall_minutes = self.evtgen_wall_minutes
        evtgen.username = self.username
        evtgen.scheduler_args = self.evtgen_scheduler_args
        evtgen.target_site = self.parallel_site

        argo_job.add_job(evtgen)

        return argo_job