Exemplo n.º 1
0
def process_images(random_line):
    conflict = 0
    tic = time.clock()
    filepath = str(random_line).replace(
        '/home/gaobing/projects/rpp-ycoady/spectral', '/spectral', 1)
    #filename = line.rsplit('/',1)[1].strip()
    filename = filepath.strip()
    # level2 filename determined from level1 name, if outdir is not provided it will go to the same folder as level1
    # for fn in glob(filepath+pattern ,recursive=True):
    print("new file name is ", filename)
    lock = SoftFileLock(prcessing_lockname, timeout=45)
    with lock:
        try:
            with lock.acquire(timeout=35):
                with open(prcessing_fn, 'a') as outfile:
                    outfile.write(random_line)
        except Timeout:
            print("timed out - prcessing_fn")
            time.sleep(5)
    try:
        user_algorithm(filename)
    except OutputExists:
        print("found this one exists in destination")
        conflict = conflict + 1
        return
    toc = time.clock()
    print('Processing time for images', random_line, ': (seconds)')
    print(toc - tic)
    print('conflict number ', conflict)
Exemplo n.º 2
0
 def add(self, dose_file):
     lockfile = dose_file + ".lock"
     dose = None
     n_primaries = 0
     t0 = datetime.now()
     logger.debug("lockfile exists" if os.path.
                  exists(lockfile) else "lockfile does not exist")
     lock = SoftFileLock(lockfile)
     try:
         # TODO: the length of the timeout should maybe be configured in the system configuration
         with lock.acquire(timeout=3):
             t1 = datetime.now()
             logger.debug("acquiring lock file took {} seconds".format(
                 (t1 - t0).total_seconds()))
             ##########################
             n_primaries = self.get_nprimaries(dose_file)
             if n_primaries < 1:
                 logger.warn(
                     f"dose file seems to be based on too few primaries ({n_primaries})"
                 )
             elif bool(self.mass) and bool(self.mask):
                 simdose = itk.imread(dose_file)
                 logger.debug(
                     "resampling dose with size {} using mass file of size {} to target size {}"
                     .format(itk.size(simdose), itk.size(self.mass),
                             itk.size(self.mask)))
                 dose = mass_weighted_resampling(simdose, self.mass,
                                                 self.mask)
                 del simdose
             else:
                 dose = itk.imread(dose_file)
                 logger.debug("read dose with size {}".format(
                     itk.size(dose)))
             t2 = datetime.now()
             logger.debug(
                 "acquiring dose data {} file took {} seconds".format(
                     os.path.basename(dose_file),
                     (t2 - t1).total_seconds()))
             if self.wmin > n_primaries:
                 self.wmin = n_primaries
             if self.wmax < n_primaries:
                 self.wmax = n_primaries
     except Timeout:
         logger.warn(
             "failed to acquire lock for {} for 3 seconds, giving up for now"
             .format(dose_file))
         return
     if not bool(dose):
         logger.warn("skipping {}".format(dose_file))
         return
     adose = itk.array_from_image(dose)
     if adose.shape != self.dosesum.shape:
         raise RuntimeError(
             "PROGRAMMING ERROR: dose shape {} differs from expected shape {}"
             .format(adose.shape, self.dosesum.shape))
     self.dosesum += adose  # n_primaries * (adose / n_primaries)
     self.dose2sum += adose**2 / n_primaries  # n_primaries * (adose / n_primaries)**2
     self.weightsum += n_primaries
     self.n += 1
Exemplo n.º 3
0
    def __init__(self, path, acquire_timeout=60*60, lock_timeout=15, expire=120):
        self.path = path
        self.mlock_path = self.path + '.2'

        self.expire = expire
        self.lock_timeout = lock_timeout
        self.acquire_timeout = acquire_timeout

        self.primary_locker = SoftFileLock(self.path, self.lock_timeout)
Exemplo n.º 4
0
def clean_up(random_line):

    lock = SoftFileLock(completed_lockname, timeout=35)
    with lock:
        try:
            with lock.acquire(timeout=30):
                print("lock acquired - complete_file")
                with open(completed_fn, 'a') as outfile:
                    outfile.write(random_line)
        except Timeout:
            print("timed out - complete_file")
            time.sleep(5)
Exemplo n.º 5
0
def update_archival(outpath):
    #add information to "package slip" meta meta data

    infofile = os.path.join(outpath, "package_slip.json")
    infofile_lock = os.path.join(outpath, "package_slip.json.lock")
    lock_timeout = 5
    lock = SoftFileLock(infofile_lock)

    try:
        with lock.acquire(timeout=lock_timeout):
            if os.path.isfile(infofile):
                # file exists, needs update
                help.status_note(['Going to update ', infofile], d=is_debug)
                with open(infofile, encoding='utf-8') as data_file:
                    data = json.load(data_file)
                    found = False
                    for key in data['standards_used']:
                        if key == archival_info['standards_used'][0]:
                            found = True
                    if not found:
                        data['standards_used'].append(
                            archival_info['standards_used'][0])
            else:
                # file does not exist, needs to be created
                data = archival_info

            # in any case: write current data back to info file
            help.status_note(['Write info to ', infofile], d=is_debug)
            with open(infofile, 'w', encoding='utf-8') as outfile:
                # for json:
                output_data = json.dumps(data,
                                         sort_keys=True,
                                         indent=4,
                                         separators=(',', ': '))
                outfile.write(str(output_data))
                help.status_note([
                    str(os.stat(infofile).st_size), ' bytes written to ',
                    os.path.abspath(infofile)
                ],
                                 d=is_debug)
    except Timeout:
        help.status_note([
            'Cannot acquire lock within ', lock_timeout,
            ' seconds, raising exception!'
        ],
                         d=is_debug)
        raise
    except Exception as exc:
        help.status_note(str(exc), d=is_debug)
        raise
    finally:
        lock.release()
Exemplo n.º 6
0
 async def __aenter__(self):
     lock = SoftFileLock(self.lockfile)
     acquired_lock = False
     while not acquired_lock:
         try:
             lock.acquire(timeout=0)
             acquired_lock = True
         except Timeout:
             await asyncio.sleep(self.timeout)
             if self.timeout <= 2:
                 self.timeout = self.timeout * 2
     self.lock = lock
     return self
Exemplo n.º 7
0
 def save_data(self, key, value):
     """Store config flie"""
     datadict = {}
     if os.path.exists(self.data_file):
         with SoftFileLock("%s.lock" % self.data_file):
             with open(self.data_file, "rt") as file:
                 datadict = load(file)
     else:
         dirname = os.path.dirname(self.data_file)
         if not os.path.exists(dirname):
             mkdir_p(dirname)
     with SoftFileLock("%s.lock" % self.data_file):
         with open(self.data_file, "wt") as file:
             datadict[key] = value
             dump(datadict, file)
Exemplo n.º 8
0
    def __init__(self,
                 path,
                 acquire_timeout=4 * 60 * 60,
                 lock_timeout=15,
                 expire=120,
                 keep_alive_handler=None):
        self.path = path
        self.mlock_path = self.path + '.2'

        self.expire = expire
        self.lock_timeout = lock_timeout
        self.acquire_timeout = acquire_timeout

        self.primary_locker = SoftFileLock(self.path, self.lock_timeout)
        self.keep_alive_handler = keep_alive_handler
Exemplo n.º 9
0
def get_lock(path: str, timeout: int = 600):
    os.makedirs(os.path.dirname(path), exist_ok=True)

    lock_path = path + '.lock'
    logger.info('trying to acquire lock "%s"', lock_path)

    return SoftFileLock(lock_path, timeout=timeout)
Exemplo n.º 10
0
    def write_dsn(self, wdmpath, dsn, data):
        """Write to self.wdmfp/dsn the time-series data."""
        dsn_desc = self.describe_dsn(wdmpath, dsn)
        tcode = dsn_desc["tcode"]
        tstep = dsn_desc["tstep"]
        tsfill = dsn_desc["tsfill"]

        data.fillna(tsfill, inplace=True)
        start_date = data.index[0]

        dstart_date = start_date.timetuple()[:6]
        llsdat = self._tcode_date(tcode, dstart_date)
        if dsn_desc["base_year"] > llsdat[0]:
            raise ValueError(
                tsutils.error_wrapper("""
The base year for this DSN is {0}.  All data to insert must be after the
base year.  Instead the first year of the series is {1}.
""".format(dsn_desc["base_year"], llsdat[0])))

        nval = len(data)
        lock = SoftFileLock(wdmpath + ".lock", timeout=30)
        with lock:
            wdmfp = self._open(wdmpath, 58)
            retcode = self.wdtput(wdmfp, dsn, tstep, llsdat, nval, 1, 0, tcode,
                                  data)
            self._close(wdmpath)
        self._retcode_check(retcode,
                            additional_info="wdtput file={0} DSN={1}".format(
                                wdmpath, dsn))
Exemplo n.º 11
0
    def _run_task(task_class, config, logger, metadata):
        config.set_logger(logger.get_with_module('config'))
        config.iteration_cursor = metadata["finished_iterations"]

        task = task_class(config, logger.get_with_module('task'), metadata)
        metadata_lock = SoftFileLock(metadata["task_dir"] / "metadata.json.lock")

        def save_func(finished_iterations):
            with metadata_lock:
                task.save(metadata["task_dir"])
                with open(str(metadata["task_dir"] / Path("metadata.json")), 'r') as handle:
                    data = json.load(handle)

                with open(str(metadata["task_dir"] / Path("metadata.json")), 'w') as handle:
                    data['saved_time'] = time.mktime(datetime.datetime.now().timetuple())
                    data['finished_iterations'] = finished_iterations
                    json.dump(data, handle)

            metadata["pipe"].send(PipeMsg.SAVED_FINISHED_ITERATIONS, {"saved_finished_iterations": finished_iterations, "saved_time": data['saved_time']})

        def checkpoint_func(finished_iterations):
            save_func(finished_iterations)
            checkpoint = TaskWrapper._create_checkpoint(metadata_lock, metadata["task_dir"], finished_iterations)
            return checkpoint

        if metadata["finished_iterations"] > 0:
            task.load(metadata["task_dir"])
        task.run(save_func, checkpoint_func)

        save_func(task.finished_iterations)
Exemplo n.º 12
0
def save(task_path: Path, result=None, task=None, name_prefix=None):
    """
    Save a :class:`~pydra.engine.core.TaskBase` object and/or results.

    Parameters
    ----------
    task_path : :obj:`Path`
        Write directory
    result : :obj:`Result`
        Result to pickle and write
    task : :class:`~pydra.engine.core.TaskBase`
        Task to pickle and write
    """

    if task is None and result is None:
        raise ValueError("Nothing to be saved")

    if not isinstance(task_path, Path):
        task_path = Path(task_path)
    task_path.mkdir(parents=True, exist_ok=True)
    if name_prefix is None:
        name_prefix = ""

    lockfile = task_path.parent / (task_path.name + "_save.lock")
    with SoftFileLock(lockfile):
        if result:
            if task_path.name.startswith(
                    "Workflow") and result.output is not None:
                # copy files to the workflow directory
                result = copyfile_workflow(wf_path=task_path, result=result)
            with (task_path / f"{name_prefix}_result.pklz").open("wb") as fp:
                cp.dump(result, fp)
        if task:
            with (task_path / f"{name_prefix}_task.pklz").open("wb") as fp:
                cp.dump(task, fp)
Exemplo n.º 13
0
 async def _run(self, submitter=None, rerun=False, **kwargs):
     # self.inputs = dc.replace(self.inputs, **kwargs) don't need it?
     # output_spec needs to be set using set_output or at workflow initialization
     if self.output_spec is None:
         raise ValueError(
             "Workflow output cannot be None, use set_output to define output(s)"
         )
     checksum = self.checksum
     # Eagerly retrieve cached
     if not (rerun or self.task_rerun):
         result = self.result()
         if result is not None:
             return result
     # creating connections that were defined after adding tasks to the wf
     for task in self.graph.nodes:
         # if workflow has task_rerun=True and propagate_rerun=True,
         # it should be passed to the tasks
         if self.task_rerun and self.propagate_rerun:
             task.task_rerun = self.task_rerun
             # if the task is a wf, than the propagate_rerun should be also set
             if is_workflow(task):
                 task.propagate_rerun = self.propagate_rerun
         task.cache_locations = task._cache_locations + self.cache_locations
         self.create_connections(task)
     # TODO add signal handler for processes killed after lock acquisition
     # adding info file with the checksum in case the task was cancelled
     # and the lockfile has to be removed
     with open(self.cache_dir / f"{self.uid}_info.json", "w") as jsonfile:
         json.dump({"checksum": checksum}, jsonfile)
     lockfile = self.cache_dir / (checksum + ".lock")
     self.hooks.pre_run(self)
     with SoftFileLock(lockfile):
         # # Let only one equivalent process run
         odir = self.output_dir
         if not self.can_resume and odir.exists():
             shutil.rmtree(odir)
         cwd = os.getcwd()
         odir.mkdir(parents=False, exist_ok=True if self.can_resume else False)
         self.audit.start_audit(odir=odir)
         result = Result(output=None, runtime=None, errored=False)
         self.hooks.pre_run_task(self)
         try:
             self.audit.monitor()
             await self._run_task(submitter, rerun=rerun)
             result.output = self._collect_outputs()
         except Exception as e:
             record_error(self.output_dir, e)
             result.errored = True
             self._errored = True
             raise
         finally:
             self.hooks.post_run_task(self, result)
             self.audit.finalize_audit(result=result)
             save(odir, result=result, task=self)
             # removing the additional file with the chcksum
             (self.cache_dir / f"{self.uid}_info.json").unlink()
             os.chdir(cwd)
     self.hooks.post_run(self, result)
     return result
Exemplo n.º 14
0
def test_cleanup_soft_lock(tmp_path: Path) -> None:
    # tests if the lock file is removed after use
    lock_path = tmp_path / "a"
    lock = SoftFileLock(str(lock_path))

    with lock:
        assert lock_path.exists()
    assert not lock_path.exists()
Exemplo n.º 15
0
 def _run(self, rerun=False, **kwargs):
     self.inputs = attr.evolve(self.inputs, **kwargs)
     self.inputs.check_fields_input_spec()
     checksum = self.checksum
     lockfile = self.cache_dir / (checksum + ".lock")
     # Eagerly retrieve cached - see scenarios in __init__()
     self.hooks.pre_run(self)
     with SoftFileLock(lockfile):
         if not (rerun or self.task_rerun):
             result = self.result()
             if result is not None:
                 return result
         # adding info file with the checksum in case the task was cancelled
         # and the lockfile has to be removed
         with open(self.cache_dir / f"{self.uid}_info.json",
                   "w") as jsonfile:
             json.dump({"checksum": self.checksum}, jsonfile)
         # Let only one equivalent process run
         odir = self.output_dir
         if not self.can_resume and odir.exists():
             shutil.rmtree(odir)
         cwd = os.getcwd()
         odir.mkdir(parents=False,
                    exist_ok=True if self.can_resume else False)
         orig_inputs = attr.asdict(self.inputs)
         map_copyfiles = copyfile_input(self.inputs, self.output_dir)
         modified_inputs = template_update(self.inputs,
                                           self.output_dir,
                                           map_copyfiles=map_copyfiles)
         if modified_inputs:
             self.inputs = attr.evolve(self.inputs, **modified_inputs)
         self.audit.start_audit(odir)
         result = Result(output=None, runtime=None, errored=False)
         self.hooks.pre_run_task(self)
         try:
             self.audit.monitor()
             self._run_task()
             result.output = self._collect_outputs(output_dir=odir)
         except Exception as e:
             etype, eval, etr = sys.exc_info()
             traceback = format_exception(etype, eval, etr)
             record_error(self.output_dir, error=traceback)
             result.errored = True
             raise
         finally:
             self.hooks.post_run_task(self, result)
             self.audit.finalize_audit(result)
             save(odir, result=result, task=self)
             self.output_ = None
             # removing the additional file with the chcksum
             (self.cache_dir / f"{self.uid}_info.json").unlink()
             # # function etc. shouldn't change anyway, so removing
             orig_inputs = dict((k, v) for (k, v) in orig_inputs.items()
                                if not k.startswith("_"))
             self.inputs = attr.evolve(self.inputs, **orig_inputs)
             os.chdir(cwd)
     self.hooks.post_run(self, result)
     return result
Exemplo n.º 16
0
 def __init__(self, storage_path):
     self.storage_path = storage_path
     # check path existante
     assert os.path.exists(
         self.full_path(COUNTER_DAT)
     ), "Storage path access error. Missing %s !" % self.full_path(
         COUNTER_DAT)
     # Lock files
     self.lock = SoftFileLock(self.full_path(LOCK_FILE))
Exemplo n.º 17
0
 def get_data(self, key):
     """Read options file"""
     if not os.path.exists(self.data_file):
         return None
     with SoftFileLock("%s.lock" % self.data_file):
         with open(self.data_file, "rt") as file:
             datadict = load(file)
     if key in datadict:
         return datadict[key]
     return None
Exemplo n.º 18
0
def _run_run(run):
    """Actually run a run."""
    res = _execute(run.command)

    # check return codes
    if (res.returncode not in run.allowed_return_codes
            and run.allowed_return_codes != []):
        _print_warning("unexpected return code for command: " + res.args +
                       "\n" + res.stderr.strip() + "")
        return

    if run.stdout_file is None:
        return

    filename = run.stdout_file
    lock = filename + ".lock"
    os.makedirs(os.path.dirname(filename), exist_ok=True)

    # create new file with header
    if not os.path.isfile(filename) and (run.header_command is not None
                                         or run.header_string is not None):
        header = run.header_string
        if run.header_command is not None:
            header = _execute(run.header_command).stdout.strip()
        header = run.header_mod(header)

        with SoftFileLock(lock):
            if not os.path.isfile(filename):
                with open(filename, "w") as out:
                    print(header, file=out, flush=True)

    # write to stdout
    stdout = res.stdout.strip()
    mod = run.stdout_mod
    output = mod(stdout) if len(signature(mod).parameters) == 1 else mod(
        stdout, res)
    if run.stdout_res is not None:
        run.args["stdout"] = output
        output = deblob(run.stdout_res, run.args)

    with SoftFileLock(run.stdout_file + ".lock"):
        with open(run.stdout_file, "a") as out:
            print(output, file=out, flush=True)
Exemplo n.º 19
0
Arquivo: cluster.py Projeto: jgu2/jade
    def _do_action_under_lock_internal(lock_file,
                                       func,
                                       *args,
                                       timeout=LOCK_TIMEOUT,
                                       **kwargs):
        # Using this instead of FileLock because it will be used across nodes
        # on the Lustre filesystem.
        lock_acquisition_seconds = None
        lock = SoftFileLock(lock_file, timeout=timeout)
        try:
            start = time.time()
            lock.acquire(timeout=timeout)
            lock_acquisition_seconds = time.time() - start
        except Timeout:
            # Picked a default value such that this should not trip. If it does
            # trip under normal circumstances then we need to reconsider this.
            logger.error(
                "Failed to acquire file lock %s within %s seconds. hostname=%s",
                lock_file,
                timeout,
                socket.gethostname(),
            )
            raise

        if lock_acquisition_seconds > 5:
            logger.warning(
                "cluster lock acquisition for %s took %s seconds",
                func.__name__,
                lock_acquisition_seconds,
            )

        try:
            val = func(*args, **kwargs)
            lock.release()
            return val
        except Exception as exc:
            lock.release()
            # SoftFileLock always deletes the file, so create it again.
            # There is a small window where this won't work and another node takes over, so
            # callers need to handle the possibility.
            logger.exception(
                "An exception occurred while holding the Cluster lock. The state of the cluster "
                "is unknown.")
            try:
                fd = os.open(lock_file,
                             os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC)
                os.close(fd)
                logger.error("A deadlock will occur.")
            except (IOError, OSError):
                logger.exception(
                    "Unable to cause deadlock. Another node was promoted to submitter"
                )
            raise exc
Exemplo n.º 20
0
    def _lock():
        """Return a context manager for acquiring ``_lock_filename()``.

        For example::

            with BatteryController._lock():
                status = BatteryController._read_state()
                # --- Mutate status here ---
                BatteryController._poll(status)
        """
        return SoftFileLock(BatteryController._lock_filename(), timeout=30)
Exemplo n.º 21
0
def loadpkl(infile):
    """Load a zipped or plain cPickled file."""
    infile = Path(infile)
    fmlogger.debug('Loading pkl: %s', infile)
    pklopen = gzip.open if infile.suffix == '.pklz' else open

    with SoftFileLock('%s.lock' % infile):
        with pklopen(str(infile), 'rb') as pkl_file:
            pkl_contents = pkl_file.read()

    pkl_metadata = None

    # Look if pkl file contains version metadata
    idx = pkl_contents.find(b'\n')
    if idx >= 0:
        try:
            pkl_metadata = json.loads(pkl_contents[:idx])
        except (UnicodeDecodeError, json.JSONDecodeError):
            # Could not get version info
            pass
        else:
            # On success, skip JSON metadata
            pkl_contents = pkl_contents[idx + 1:]

    # Pickle files may contain relative paths that must be resolved relative
    # to the working directory, so use indirectory while attempting to load
    unpkl = None
    try:
        with indirectory(infile.parent):
            unpkl = pickle.loads(pkl_contents)
    except UnicodeDecodeError:
        # Was this pickle created with Python 2.x?
        with indirectory(infile.parent):
            unpkl = pickle.loads(pkl_contents, fix_imports=True, encoding='utf-8')
        fmlogger.info('Successfully loaded pkl in compatibility mode.')
    # Unpickling problems
    except Exception as e:
        if pkl_metadata and 'version' in pkl_metadata:
            from nipype import __version__ as version
            if pkl_metadata['version'] != version:
                fmlogger.error("""\
Attempted to open a results file generated by Nipype version %s, \
with an incompatible Nipype version (%s)""", pkl_metadata['version'], version)
                raise e
        fmlogger.warning("""\
No metadata was found in the pkl file. Make sure you are currently using \
the same Nipype version from the generated pkl.""")
        raise e

    if unpkl is None:
        raise ValueError('Loading %s resulted in None.' % infile)

    return unpkl
Exemplo n.º 22
0
def savepkl(filename, record, versioning=False):
    pklopen = gzip.open if filename.endswith('.pklz') else open
    with SoftFileLock('%s.lock' % filename):
        with pklopen(filename, 'wb') as pkl_file:
            if versioning:
                from nipype import __version__ as version
                metadata = json.dumps({'version': version})

                pkl_file.write(metadata.encode('utf-8'))
                pkl_file.write('\n'.encode('utf-8'))

            pickle.dump(record, pkl_file)
Exemplo n.º 23
0
def update_archival(outpath):
    #add information to "package slip" meta meta data

    infofile = os.path.join(outpath, "package_slip.json")
    infofile_lock = os.path.join(outpath, "package_slip.json.lock")
    lock_timeout = 5
    lock = SoftFileLock(infofile_lock)

    try:
        with lock.acquire(timeout=lock_timeout):
            if os.path.isfile(infofile):
                # file exists, needs update
                status_note(['Going to update ', infofile], d=is_debug)
                with open(infofile, encoding='utf-8') as data_file:
                    data = json.load(data_file)
                    found = False
                    for key in data['standards_used']:
                        if key == archival_info['standards_used'][0]:
                            found = True
                    if not found:
                        data['standards_used'].append(archival_info['standards_used'][0])
            else:
                # file does not exist, needs to be created
                data = archival_info
            
            # in any case: write current data back to info file
            status_note(['Write info to ', infofile], d=is_debug)
            with open(infofile, 'w', encoding='utf-8') as outfile:
                # for json:
                output_data = json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))
                outfile.write(str(output_data))
                status_note([str(os.stat(infofile).st_size), ' bytes written to ', os.path.abspath(infofile)], d=is_debug)
    except Timeout:
        status_note(['Cannot acquire lock within ', lock_timeout, ' seconds, raising exception!'], d=is_debug)
        raise
    except Exception as exc:
        status_note(str(exc), d=is_debug)
        raise
    finally:
        lock.release()
Exemplo n.º 24
0
def spool_lock(spoolpath):
    lock = SoftFileLock(join(spoolpath, 'lock'))
    lock.acquire()
    try:
        yield lock
    finally:
        lock.release()
Exemplo n.º 25
0
    def renumber_dsn(self, wdmpath, odsn, ndsn):
        """Will renumber the odsn to the ndsn."""
        odsn = int(odsn)
        ndsn = int(ndsn)

        lock = SoftFileLock(wdmpath + ".lock", timeout=30)
        with lock:
            wdmfp = self._open(wdmpath, 51)
            retcode = self.wddsrn(wdmfp, odsn, ndsn)
            self._close(wdmpath)
        self._retcode_check(retcode,
                            additional_info="wddsrn file={0} DSN={1}".format(
                                wdmpath, odsn))
Exemplo n.º 26
0
    def locked_call(callable: Callable[[], Any], lock_file: str, lock_timeout: int) -> Any:
        """Locks a callable execution with a given timout and a specified lock file.

        Parameters
        ----------
        callable : Callable[[], Any]
            Whatever should be executed thread safe in a multi host environment.

        Returns
        -------
        Any
            The result of the callable


        Raises
        ------
        Timeout
            If the locking times out.
        """
        lock = SoftFileLock(lock_file, timeout=lock_timeout)
        with lock.acquire(timeout=lock_timeout):
            return callable()
Exemplo n.º 27
0
    def _optimistic_lock():
        """Return a context manager for acquiring ``_lock_filename()``.

        If the file is locked, we raise an exception rather than
        waiting.

        For example::

            with BatteryController._optimistic_lock():
                status = BatteryController._read_state()
                # --- Mutate status here ---
                BatteryController._poll(status)
        """
        return SoftFileLock(BatteryController._lock_filename())
Exemplo n.º 28
0
    async def _run(self, submitter=None, **kwargs):
        # self.inputs = dc.replace(self.inputs, **kwargs) don't need it?
        checksum = self.checksum
        lockfile = self.cache_dir / (checksum + ".lock")
        # Eagerly retrieve cached
        result = self.result()
        if result is not None:
            return result
        # creating connections that were defined after adding tasks to the wf
        for task in self.graph.nodes:
            self.create_connections(task)
        """
        Concurrent execution scenarios

        1. prior cache exists -> return result
        2. other process running -> wait
           a. finishes (with or without exception) -> return result
           b. gets killed -> restart
        3. no cache or other process -> start
        4. two or more concurrent new processes get to start
        """
        # TODO add signal handler for processes killed after lock acquisition
        self.hooks.pre_run(self)
        with SoftFileLock(lockfile):
            # # Let only one equivalent process run
            odir = self.output_dir
            if not self.can_resume and odir.exists():
                shutil.rmtree(odir)
            cwd = os.getcwd()
            odir.mkdir(parents=False,
                       exist_ok=True if self.can_resume else False)
            self.audit.start_audit(odir=odir)
            result = Result(output=None, runtime=None, errored=False)
            self.hooks.pre_run_task(self)
            try:
                self.audit.monitor()
                await self._run_task(submitter)
                result.output = self._collect_outputs()
            except Exception as e:
                record_error(self.output_dir, e)
                result.errored = True
                raise
            finally:
                self.hooks.post_run_task(self, result)
                self.audit.finalize_audit(result=result)
                save(odir, result=result, task=self)
                os.chdir(cwd)
        self.hooks.post_run(self, result)
        return result
Exemplo n.º 29
0
 def _run(self, rerun=False, **kwargs):
     self.inputs = attr.evolve(self.inputs, **kwargs)
     self.inputs.check_fields_input_spec()
     checksum = self.checksum
     lockfile = self.cache_dir / (checksum + ".lock")
     # Eagerly retrieve cached - see scenarios in __init__()
     self.hooks.pre_run(self)
     # TODO add signal handler for processes killed after lock acquisition
     with SoftFileLock(lockfile):
         if not (rerun or self.task_rerun):
             result = self.result()
             if result is not None:
                 return result
         # Let only one equivalent process run
         odir = self.output_dir
         if not self.can_resume and odir.exists():
             shutil.rmtree(odir)
         cwd = os.getcwd()
         odir.mkdir(parents=False,
                    exist_ok=True if self.can_resume else False)
         orig_inputs = attr.asdict(self.inputs)
         map_copyfiles = copyfile_input(self.inputs, self.output_dir)
         modified_inputs = template_update(self.inputs,
                                           self.output_dir,
                                           map_copyfiles=map_copyfiles)
         if modified_inputs:
             self.inputs = attr.evolve(self.inputs, **modified_inputs)
         self.audit.start_audit(odir)
         result = Result(output=None, runtime=None, errored=False)
         self.hooks.pre_run_task(self)
         try:
             self.audit.monitor()
             self._run_task()
             result.output = self._collect_outputs()
         except Exception as e:
             record_error(self.output_dir, e)
             result.errored = True
             raise
         finally:
             self.hooks.post_run_task(self, result)
             self.audit.finalize_audit(result)
             save(odir, result=result, task=self)
             # # function etc. shouldn't change anyway, so removing
             orig_inputs = dict((k, v) for (k, v) in orig_inputs.items()
                                if not k.startswith("_"))
             self.inputs = attr.evolve(self.inputs, **orig_inputs)
             os.chdir(cwd)
     self.hooks.post_run(self, result)
     return result
Exemplo n.º 30
0
def _create_lock(path):
    """Create lock based on file system capabilities

    Determine if we can rely on the fcntl module for locking files.
    Otherwise, fallback on using the directory creation atomicity as a locking mechanism.
    """
    file_system = _get_fs(path)

    if _fs_support_globalflock(file_system):
        log.debug("Using flock.")
        return FileLock(path)
    else:
        log.debug(
            "Cluster does not support flock. Falling back to softfilelock.")
        return SoftFileLock(path)
Exemplo n.º 31
0
    def lock(self, key):
        lockdir = join(self.base, 'locks')
        lockfname = join(lockdir, self.hash(key) + '.lock')

        self.pdebug('LOCK - ' + key + '(' + lockfname + ')')

        if not exists(lockdir):
            try:
                makedirs(lockdir)
            except:
                pass

        print(f'locking {key}', flush=True)

        return SoftFileLock(lockfname)