def persist_input(self, args_tuple, kwargs_dict, filtered_args_dict): DirectoryJob.persist_input(self, args_tuple, kwargs_dict, filtered_args_dict) call_info = dict(func=self.func, version_info=self.func.version_info, args=args_tuple, kwargs=kwargs_dict) numpy_pickle.dump(call_info, pjoin(self._work_path, 'input.pkl'))
def __init__(self, store, func, func_path, func_hash, job_hash): self.store_path = store.store_path job_path = pjoin(self.store_path, JOBS_DIR_NAME, func_path, func_hash, job_hash) DirectoryJob.__init__( self, job_path=job_path, store=store, func=func, logger=store.logger, save_npy=store.save_npy, mmap_mode=store.mmap_mode, ) self.job_hash = job_hash self._jobid_link = pjoin(self.store_path, IDS_DIR_NAME, self.job_hash[:2], self.job_hash[2:])
def __init__(self, store, func, func_path, func_hash, job_hash): self.store_path = store.store_path job_path = pjoin(self.store_path, JOBS_DIR_NAME, func_path, func_hash, job_hash) DirectoryJob.__init__(self, job_path=job_path, store=store, func=func, logger=store.logger, save_npy=store.save_npy, mmap_mode=store.mmap_mode) self.job_hash = job_hash self._jobid_link = pjoin(self.store_path, IDS_DIR_NAME, self.job_hash[:2], self.job_hash[2:])
def load_or_lock(self, blocking=True, pre_load_hook=_noop, post_load_hook=_noop): if self.is_computed(): status, output = DirectoryJob.load_or_lock(self, blocking, pre_load_hook, post_load_hook) if status == MUST_COMPUTE: # This happens on unpickling errors; we # fail hard on those instead raise ClusterJobError('Could not unpickle: %s/output.pkl' % self.job_path) return (status, output) else: output = None # Make output dir -- use this as our lock to figure out # whether it is running/dispatched already, or needs # to be computed. try: os.makedirs(self.job_path) except OSError, e: if e.errno == errno.EEXIST: running = True else: raise else:
def load_or_lock(self, blocking=True, pre_load_hook=_noop, post_load_hook=_noop): if self.is_computed(): status, output = DirectoryJob.load_or_lock(self, blocking, pre_load_hook, post_load_hook) if status == MUST_COMPUTE: # This happens on unpickling errors; we # fail hard on those instead raise ClusterJobError("Could not unpickle: %s/output.pkl" % self.job_path) return (status, output) else: output = None # Make output dir -- use this as our lock to figure out # whether it is running/dispatched already, or needs # to be computed. try: os.makedirs(self.job_path) except OSError, e: if e.errno == errno.EEXIST: running = True else: raise else:
def persist_input(self, args_tuple, kwargs_dict, filtered_args_dict): DirectoryJob.persist_input(self, args_tuple, kwargs_dict, filtered_args_dict) call_info = dict(func=self.func, version_info=self.func.version_info, args=args_tuple, kwargs=kwargs_dict) numpy_pickle.dump(call_info, pjoin(self._work_path, "input.pkl"))