def __init__(self, jobname):
        Job.__init__(self)
        global s
        lock.acquire()
        s+=1
        lock.release()
        self.jobname = jobname
        self.state = None
        
        def add_a_child(parent):
            print >>sys.stderr, "Adding a child job for %s" %(parent) 
            JobPool().enqueue_job(TestJob("%s.child" %parent))
            #print "Added a child for: ",parent

        def a_very_bad_callback():
            raise Exception ("A bad callback raises an exception.")
        
        ''' Make a random number of these jobs, child-bearing'''
        if random() < 0.75:
            self.add_call_Back(lambda result, parent = self.jobname: add_a_child(parent))

        if random() < 0.1:
            self.add_call_Back(lambda result: a_very_bad_callback())
            
        self.add_call_Back(self.print_res)
Exemple #2
0
    def __init__(self, jobname):
        Job.__init__(self)
        global s
        lock.acquire()
        s += 1
        lock.release()
        self.jobname = jobname
        self.state = None

        def add_a_child(parent):
            print >> sys.stderr, "Adding a child job for %s" % (parent)
            JobPool().enqueue_job(TestJob("%s.child" % parent))
            #print "Added a child for: ",parent

        def a_very_bad_callback():
            raise Exception("A bad callback raises an exception.")

        ''' Make a random number of these jobs, child-bearing'''
        if random() < 0.75:
            self.add_call_Back(
                lambda result, parent=self.jobname: add_a_child(parent))

        if random() < 0.1:
            self.add_call_Back(lambda result: a_very_bad_callback())

        self.add_call_Back(self.print_res)
Exemple #3
0
 def __init__(self, jobtype, path=None, **kwargs):
     Job.__init__(self)
     self.job_type = jobtype
     self._id = None  # _process id for this job
     self._kwargs = dict(kwargs)
     self._process = None
     ''' The following will contain stdout and stderr values *only if* those
     are piped. If not, user should read the files to find the output if
     needed.
     '''
     self.stdoutdata = self.stderrdata = None
     ''' When the following is something other than None, it is piped in to
     the subprocess.
     '''
     self.stdindata = None
     # setting this variable tell JobPoll that errors in this job can be
     # ignored when waiting for reults of all jobs to finish
     self.ignore_error = False
     self.fake_run = False
     self.attributes = dict()
     if path:
         self.path = path
     else:
         self.path = sepp.config.options().__getattribute__(
             self.job_type).path
     self.results_on_temp = False
Exemple #4
0
 def __init__(self, jobtype, **kwargs):        
     Job.__init__(self)
     self.job_type = jobtype
     self._id = None #_process id for this job
     self._kwargs = dict(kwargs)
     self._process = None
     ''' The following will contain stdout and stderr values *only if* those
     are piped. If not, user should read the files to find the output if needed.
     '''
     self.stdoutdata = self.stderrdata = None
     ''' When the following is something other than None, it is piped in to
     the subprocess. 
     '''
     self.stdindata = None        
     self.ignore_error = False # setting this variable tell JobPoll that errors in this job can be ignored when waiting for reults of all jobs to finish
     self.fake_run = False
     self.attributes=dict()
     self.path = sepp.config.options().__getattribute__(self.job_type).path         
Exemple #5
0
 def __init__(self, problem):
     Job.__init__(self)
     problem.add_job(self.type, self)
     self.problem_name = "level " + str(problem.level)
Exemple #6
0
 def __init__(self, problem):
     Job.__init__(self)
     problem.add_job(self.type, self)
     self.problem_name = "level " + str(problem.level)