import warnings


# Set up some global information we need
test_files_base_dir = os.path.join(os.path.dirname(__file__))

# We need a lock file so multiple tests aren't hitting the db at the same time
# Trying to create simulated DAQmx devices at the same time (which can happen when running
# tox with --parallel N, or when two different drivers are being tested at the same time on
# the same machine, can result in an internal error:
# -2147220733: MAX:  (Hex 0x80040303) Internal error: The requested object was not found in
# the configuration database. Please note the steps you performed that led to this error and
# contact technical support at http://ni.com/support.
# This is filed as internal bug 255545
daqmx_sim_db_lock_file = os.path.join(tempfile.gettempdir(), 'daqmx_db.lock')
daqmx_sim_db_lock = fasteners.InterProcessLock(daqmx_sim_db_lock_file)


def get_test_file_path(file_name):
    return os.path.join(test_files_base_dir, file_name)


@pytest.fixture(scope='function')
def session():
    with nifgen.Session('', '0', False, 'Simulate=1, DriverSetup=Model:5433 (2CH);BoardType:PXIe') as simulated_session:
        yield simulated_session


@pytest.fixture(scope='function')
def session_5421():
    with daqmx_sim_db_lock:
示例#2
0
 def _path_lock(self, path):
     lockfile = self._join_path(path, 'lock')
     with fasteners.InterProcessLock(lockfile) as lock:
         with _storagefailure_wrapper():
             yield lock
示例#3
0
def load_cython_ext(mujoco_path):
    """
    Loads the cymj Cython extension. This is safe to be called from
    multiple processes running on the same machine.

    Cython only gives us back the raw path, regardless of whether
    it found a cached version or actually compiled. Since we do
    non-idempotent postprocessing of the DLL, be extra careful
    to only do that once and then atomically move to the final
    location.
    """
    if ('glfw' in sys.modules
            and 'mujoco' in abspath(sys.modules["glfw"].__file__)):
        print('''
WARNING: Existing glfw python module detected!

MuJoCo comes with its own version of GLFW, so it's preferable to use that one.

The easy solution is to `import mujoco_py` _before_ `import glfw`.
''')

    lib_path = os.path.join(mujoco_path, "bin")
    if sys.platform == 'darwin':
        Builder = MacExtensionBuilder
    elif sys.platform == 'linux':
        _ensure_set_env_var("LD_LIBRARY_PATH", lib_path)
        if os.getenv('MUJOCO_PY_FORCE_CPU') is None and get_nvidia_lib_dir(
        ) is not None:
            _ensure_set_env_var("LD_LIBRARY_PATH", get_nvidia_lib_dir())
            Builder = LinuxGPUExtensionBuilder
        else:
            Builder = LinuxCPUExtensionBuilder
    elif sys.platform.startswith("win"):
        var = "PATH"
        if var not in os.environ or lib_path not in os.environ[var].split(";"):
            raise Exception("Please add mujoco library to your PATH:\n"
                            "set %s=%s;%%%s%%" % (var, lib_path, var))
        Builder = WindowsExtensionBuilder
    else:
        raise RuntimeError("Unsupported platform %s" % sys.platform)

    builder = Builder(mujoco_path)
    cext_so_path = builder.get_so_file_path()

    lockpath = os.path.join(os.path.dirname(cext_so_path),
                            'mujocopy-buildlock')

    with fasteners.InterProcessLock(lockpath):
        mod = None
        force_rebuild = os.environ.get('MUJOCO_PY_FORCE_REBUILD')
        if force_rebuild:
            # Try to remove the old file, ignore errors if it doesn't exist
            print("Removing old mujoco_py cext", cext_so_path)
            try:
                os.remove(cext_so_path)
            except OSError:
                pass
        if exists(cext_so_path):
            try:
                mod = load_dynamic_ext('cymj', cext_so_path)
            except ImportError:
                print("Import error. Trying to rebuild mujoco_py.")
        if mod is None:
            cext_so_path = builder.build()
            mod = load_dynamic_ext('cymj', cext_so_path)

    return mod
示例#4
0
def require(env, dataset: Union[str, Dataset]) -> bool:
    """Require that the given dataset is available to the environment.

    This will download and activate the dataset if it is not already installed.
    After calling this function, benchmarks from the dataset will be available
    to use.

    Example usage:

        >>> env = gym.make("llvm-v0")
        >>> require(env, "blas-v0")
        >>> env.reset(benchmark="blas-v0/1")

    :param env: The environment that this dataset is required for.
    :param dataset: The name of the dataset to download, the URL of the dataset,
        or a :class:`Dataset` instance.
    :return: :code:`True` if the dataset was downloaded, or :code:`False` if the
        dataset was already available.
    """

    def download_and_unpack_archive(url: str, sha256: Optional[str] = None) -> Dataset:
        json_files_before = {
            f
            for f in env.inactive_datasets_site_path.iterdir()
            if f.is_file() and f.name.endswith(".json")
        }
        tar_data = io.BytesIO(download(url, sha256))
        with tarfile.open(fileobj=tar_data, mode="r:bz2") as arc:
            arc.extractall(str(env.inactive_datasets_site_path))
        json_files_after = {
            f
            for f in env.inactive_datasets_site_path.iterdir()
            if f.is_file() and f.name.endswith(".json")
        }
        new_json = json_files_after - json_files_before
        if not len(new_json):
            raise OSError(f"Downloaded dataset {url} contains no metadata JSON file")
        return Dataset.from_json_file(list(new_json)[0])

    def unpack_local_archive(path: Path) -> Dataset:
        if not path.is_file():
            raise FileNotFoundError(f"File not found: {path}")
        json_files_before = {
            f
            for f in env.inactive_datasets_site_path.iterdir()
            if f.is_file() and f.name.endswith(".json")
        }
        with tarfile.open(str(path), "r:bz2") as arc:
            arc.extractall(str(env.inactive_datasets_site_path))
        json_files_after = {
            f
            for f in env.inactive_datasets_site_path.iterdir()
            if f.is_file() and f.name.endswith(".json")
        }
        new_json = json_files_after - json_files_before
        if not len(new_json):
            raise OSError(f"Downloaded dataset {url} contains no metadata JSON file")
        return Dataset.from_json_file(list(new_json)[0])

    with fasteners.InterProcessLock(env.datasets_site_path / "LOCK"):
        # Resolve the name and URL of the dataset.
        sha256 = None
        if isinstance(dataset, Dataset):
            name, url = dataset.name, dataset.url
        elif isinstance(dataset, str):
            # Check if we have already downloaded the dataset.
            if "://" in dataset:
                name, url = None, dataset
            else:
                try:
                    dataset = env.available_datasets[dataset]
                except KeyError:
                    raise ValueError(f"Dataset not found: {dataset}")
                name, url, sha256 = dataset.name, dataset.url, dataset.sha256
        else:
            raise TypeError(
                f"require() called with unsupported type: {type(dataset).__name__}"
            )

        # Check if we have already downloaded the dataset.
        if name:
            if (env.datasets_site_path / name).is_dir():
                # Dataset is already downloaded and active.
                return False
            elif not (env.inactive_datasets_site_path / name).is_dir():
                # Dataset is downloaded but inactive.
                name = download_and_unpack_archive(url, sha256=sha256).name
        elif url.startswith("file:///"):
            name = unpack_local_archive(Path(url[len("file:///") :])).name
        else:
            name = download_and_unpack_archive(url, sha256=sha256).name

        activate(env, name)
        return True
示例#5
0
 def read_file(self, path, lock_file):
     with fasteners.InterProcessLock(lock_file) if lock_file else no_op():
         with open(path) as f:
             return f.read()
示例#6
0
文件: svn.py 项目: iStonesy/WebKit
 def _cache_lock(self):
     return fasteners.InterProcessLock(
         os.path.join(os.path.dirname(self._cache_path), 'cache.lock'))
示例#7
0
 def _setup_lock(self, sync_path):
     path = os.path.join(sync_path, "cache.lock")
     self._lock = fasteners.InterProcessLock(path)
示例#8
0
def start_app(fnames=[],
              name=[],
              dims=None,
              plot_method=None,
              output=None,
              project=None,
              engine=None,
              formatoptions=None,
              tight=False,
              encoding=None,
              enable_post=False,
              seaborn_style=None,
              output_project=None,
              concat_dim=get_default_value(xr.open_mfdataset, 'concat_dim'),
              chname={},
              backend=False,
              new_instance=False,
              rc_file=None,
              rc_gui_file=None,
              include_plugins=rcParams['plugins.include'],
              exclude_plugins=rcParams['plugins.exclude'],
              offline=False,
              pwd=None,
              script=None,
              command=None,
              exec_=True,
              use_all=False,
              callback=None):
    """
    Eventually start the QApplication or only make a plot

    Parameters
    ----------
    %(make_plot.parameters)s
    backend: None or str
        The backend to use. By default, the ``'gui.backend'`` key in the
        :attr:`~psyplot_gui.config.rcsetup.rcParams` dictionary is used.
        Otherwise it can be None to use the standard matplotlib backend or a
        string identifying the backend
    new_instance: bool
        If True/set and the `output` parameter is not set, a new application is
        created
    rc_gui_file: str
        The path to a yaml configuration file that can be used to update  the
        :attr:`~psyplot_gui.config.rcsetup.rcParams`
    include_plugins: list of str
        The plugin widget to include. Can be either None to load all that are
        not explicitly excluded by `exclude_plugins` or a list of
        plugins to include. List items can be either module names, plugin
        names or the module name and widget via ``'<module_name>:<widget>'``
    exclude_plugins: list of str
        The plugin widgets to exclude. Can be either ``'all'`` to exclude
        all plugins or a list like in `include_plugins`.
    offline: bool
        If True/set, psyplot will be started in offline mode without
        intersphinx and remote access for the help explorer
    pwd: str
        The path to the working directory to use. Note if you do not provide
        any `fnames` or `project`, but set the `pwd`, it will switch the
        `pwd` of the current GUI.
    script: str
        The path to a python script that shall be run in the GUI. If the GUI
        is already running, the commands will be executed in this GUI.
    command: str
        Python commands that shall be run in the GUI. If the GUI is already
        running, the commands will be executed in this GUI
    use_all: bool
        If True, use all variables. Note that this is the default if the
        `output` is specified and not `name`
    exec_: bool
        If True, the main loop is entered.
    callback: str
        A unique identifier for the method that should be used if psyplot is
        already running. Set this parameter to None to avoid sending

    Returns
    -------
    None or :class:`psyplot_gui.main.MainWindow`
        ``None`` if `exec_` is True, otherwise the created
        :class:`~psyplot_gui.main.MainWindow` instance
    """
    if pwd is not None:
        os.chdir(pwd)
    if script is not None:
        script = osp.abspath(script)

    if project is not None and (name != [] or dims is not None):
        warn('The `name` and `dims` parameter are ignored if the `project`'
             ' parameter is set!')

    # load rcParams from file
    if rc_gui_file is not None:
        rcParams.load_from_file(rc_gui_file)

    # set plugins
    rcParams['plugins.include'] = include_plugins
    rcParams['plugins.exclude'] = exclude_plugins

    if offline:
        rcParams['help_explorer.online'] = False
        rcParams['help_explorer.use_intersphinx'] = False

    if dims is not None and not isinstance(dims, dict):
        dims = dict(chain(*map(six.iteritems, dims)))

    if output is not None:
        return make_plot(fnames=fnames,
                         name=name,
                         dims=dims,
                         plot_method=plot_method,
                         output=output,
                         project=project,
                         engine=engine,
                         formatoptions=formatoptions,
                         tight=tight,
                         rc_file=rc_file,
                         encoding=encoding,
                         enable_post=enable_post,
                         seaborn_style=seaborn_style,
                         output_project=output_project,
                         concat_dim=concat_dim,
                         chname=chname)
    if use_all:
        name = 'all'
    else:
        name = safe_list(name)

    # Lock file creation
    if not new_instance:
        lock_file = osp.join(get_configdir(), 'psyplot.lock')
        lock = fasteners.InterProcessLock(lock_file)

        # Try to lock psyplot.lock. If it's *possible* to do it, then
        # there is no previous instance running and we can start a
        # new one. If *not*, then there is an instance already
        # running, which is locking that file
        lock_created = lock.acquire(False)
    else:
        lock_created = False

    chname = dict(chname)

    if lock_created:
        # Start a new instance
        atexit.register(lock.release)
    elif not new_instance:
        if callback is None:
            if fnames or project:
                callback = 'new_plot'
            elif pwd is not None:
                callback = 'change_cwd'
                fnames = [pwd]
            elif script is not None:
                callback = 'run_script'
                fnames = [script]
            elif command is not None:
                callback = 'command'
                engine = command
        if callback:
            send_files_to_psyplot(callback, fnames, project, engine,
                                  plot_method, name, dims, encoding,
                                  enable_post, seaborn_style, concat_dim,
                                  chname)
        return
    elif new_instance:
        rcParams['main.listen_to_port'] = False
    if backend is not False:
        rcParams['backend'] = backend
    from psyplot_gui.main import MainWindow
    fnames = _get_abs_names(fnames)
    if project is not None:
        project = _get_abs_names([project])[0]
    if exec_:
        from psyplot_gui.compat.qtcompat import QApplication
        app = QApplication(sys.argv)
    if isinstance(new_instance, MainWindow):
        mainwindow = new_instance
    else:
        mainwindow = MainWindow.run(fnames, project, engine, plot_method, name,
                                    dims, encoding, enable_post, seaborn_style,
                                    concat_dim, chname)
    if script is not None:
        mainwindow.console.run_script_in_shell(script)
    if command is not None:
        mainwindow.console.run_command_in_shell(command)
    if exec_:
        sys.excepthook = mainwindow.excepthook
        sys.exit(app.exec_())
    else:
        return mainwindow
示例#9
0
                        required=False)

    args = parser.parse_args()

    if not os.geteuid() == 0:
        error("{} must be run as root.".format(sys.argv[0]))

    if args.network is not None:
        # test if this looks valid
        if not is_cidr(args.network):
            error("--network {} is not a valid IPv4 CIDR range.".format(
                args.network))

    # lock process
    lockfile = args.config + '.lock'
    lock = fasteners.InterProcessLock(lockfile)
    if not lock.acquire(blocking=False):
        error("Lockfile '{}' exists.".format(lockfile))

    # read config file
    try:
        with open(args.config) as f:
            configstr = f.read()
    except IOError:
        configstr = ""

    while True:
        # parse yaml config
        config = yaml.load(configstr, Loader=Loader)
        if config is None:
            config = {}
def _mkdir_threadsafe_unique(log_dir_root, log_date, postfix_dir_name):
	os.makedirs(log_dir_root, exist_ok=True)
	# Make sure only one process at a time writes into log_dir_root
	with fasteners.InterProcessLock(os.path.join(log_dir_root, 'lock')):
		return _mkdir_unique(log_dir_root, log_date, postfix_dir_name)
def store_gz(obj, file_output_path):
    os.makedirs(os.path.dirname(file_output_path), exist_ok=True)
    with fasteners.InterProcessLock(file_output_path):
        with gzip.open(file_output_path, 'wt') as f:
            f.write(obj)
示例#12
0
import subprocess

import fasteners

MODE_FILE = "/tmp/leftwm_modes.txt"
LOCK_FILE = MODE_FILE + ".lock"

TAG_COUNT = 20
WORKSPACE_COUNT = 3

tag_workspace = subprocess.check_output(["leftwm-state", "-q", "-t", ".config/leftwm/focused_tag.liquid"]).decode("utf-8").split(",")

tag = int(tag_workspace[0])
workspace = int(tag_workspace[1])

lock = fasteners.InterProcessLock(LOCK_FILE)

with lock:
    if not os.path.exists(MODE_FILE):
        with open(MODE_FILE, "x") as f:
            for _ in range(TAG_COUNT * WORKSPACE_COUNT):
                f.write("1\n")

    with open(MODE_FILE, "r") as f:
        data = f.readlines()

    if data[tag + TAG_COUNT*workspace] == "1\n":
        subprocess.run('leftwm-command "SetMarginMultiplier 0"', shell=True)
        data[tag + TAG_COUNT*workspace] = "0\n"
    elif data[tag + TAG_COUNT*workspace] == "0\n":
        subprocess.run('leftwm-command "SetMarginMultiplier 1"', shell=True)
示例#13
0
 def __init__(self, path, barrier, member_id):
     super(FileLock, self).__init__(path)
     self.acquired = False
     self._lock = fasteners.InterProcessLock(path)
     self._barrier = barrier
     self._member_id = member_id
示例#14
0
def main():
  fields = {
    "poe": {"required": True, "type": "dict" },
    "cdp": {"required": True, "type": "dict" },
    "dest": {"required": True, "type": "str" },
    "hostname": {"required": True, "type": "str" }
  }
  
  module = AnsibleModule(argument_spec=fields)
  poeList = []
  cdpList = []
  poeList = module.params['poe']['response']
  cdpList = module.params['cdp']['response']
  
  #merging the two tables into preferred output
  merged = []

  #Matching poeList interfaces with cdpList interfaces
  if poeList:
    for poeInterface in poeList:
      found = False
      if cdpList:
        for cdpNeighbor in cdpList:
          poe = re.search('([a-zA-z]+)(\d\S*)', poeInterface['interface']).group(1)
          poeName = poe.group(1)
          poeInt = poe.group(2)
          
          cdp = cdpNeighbor['local_interface'].split(" ")
          cdpName = cdp[0]
          cdpInt = cdp[1]
          
          if (cdpName in poeName) or (poeName in cdpName):
            if cdpInt == poeInt:
              found = True
              merged.append(OrderedDict([
                ("Interface", poeInterface['interface']),
                ("POE Status", poeInterface['operation']),
                ("AP", cdpNeighbor['neighbor'])
              ]))
              cdpList[:] = [d for d in cdpList if d.get('local_interface') != cdpNeighbor['local_interface']] #remove current cdp that has a match from cdp list
              break
      if not found: #add poe that has no match
        merged.append(OrderedDict([
            ("Interface", poeInterface['interface']),
            ("POE Status", poeInterface['operation']),
            ("AP", "")
        ]))
        
  for cdpNeighbor in cdpList: #add cdp that has no match
    found = False
    merged.append(OrderedDict([
      ("Interface", cdpNeighbor['local_interface']),
      ("POE Status", 'N/A'),
      ("AP", cdpNeighbor['neighbor'])
    ]))

  try:
    with fasteners.InterProcessLock('/tmp/ansible_lock_file'):
      WriteDictToXl(module.params['dest'], module.params['hostname'], merged)
  except IOError as (errno, strerror):
    print("I/O error({0}): {1}".format(errno, strerror))
    module.fail_json(msg="I/O error({0}): {1}".format(errno, strerror))
示例#15
0
文件: target.py 项目: mr-c/SOS
 def write(self):
     '''Write signature file with signature of script, input, output and dependent files.'''
     if isinstance(self.output_files, Undetermined) or isinstance(
             self.dependent_files, Undetermined):
         env.logger.trace(
             'Write signature failed due to undetermined files')
         return False
     env.logger.trace('Write signature {}'.format(self.proc_info))
     with open(self.proc_info, 'w') as md5:
         md5.write('{}\n'.format(textMD5(self.script)))
         md5.write('# input\n')
         for f in self.input_files:
             if f.exists('target'):
                 # this calculates file MD5
                 f.write_sig()
                 md5.write('{}\t{}\n'.format(f, f.md5()))
             elif f.exists('signature'):
                 md5.write('{}\t{}\n'.format(f, f.md5()))
             else:
                 return False
         md5.write('# output\n')
         for f in self.output_files:
             if f.exists('target'):
                 # this calculates file MD5
                 f.write_sig()
                 md5.write('{}\t{}\n'.format(f, f.md5()))
             elif f.exists('signature'):
                 md5.write('{}\t{}\n'.format(f, f.md5()))
             else:
                 return False
         md5.write('# dependent\n')
         for f in self.dependent_files:
             if f.exists('target'):
                 # this calculates file MD5
                 f.write_sig()
                 md5.write('{}\t{}\n'.format(f, f.md5()))
             elif f.exists('signature'):
                 md5.write('{}\t{}\n'.format(f, f.md5()))
             else:
                 return False
         md5.write('# context\n')
         for var in sorted(self.signature_vars):
             # var can be local and not passed as outside environment
             if var in env.sos_dict:
                 value = env.sos_dict[var]
                 if isinstance(value, (str, bool, int, float, complex,
                                       bytes, list, tuple, set, dict)):
                     md5.write('{} = {!r}\n'.format(var, value))
                 else:
                     env.logger.debug(
                         'Variable {} of value {} is ignored from step signature'
                         .format(var, value))
         md5.write('# step process\n')
         md5.write(self.script)
     # successfully write signature, write in workflow runtime info
     workflow_sig = env.sos_dict['__workflow_sig__']
     with fasteners.InterProcessLock(workflow_sig + '_'):
         with open(workflow_sig, 'a') as wf:
             wf.write('EXE_SIG\tstep={}\tsession={}\n'.format(
                 self.step_md5,
                 os.path.basename(self.proc_info).split('.')[0]))
             for f in self.input_files:
                 if isinstance(f, FileTarget):
                     wf.write(
                         'IN_FILE\tfilename={}\tsession={}\tsize={}\tmd5={}\n'
                         .format(f, self.step_md5, f.size(), f.md5()))
             for f in self.dependent_files:
                 if isinstance(f, FileTarget):
                     wf.write(
                         'IN_FILE\tfilename={}\tsession={}\tsize={}\tmd5={}\n'
                         .format(f, self.step_md5, f.size(), f.md5()))
             for f in self.output_files:
                 if isinstance(f, FileTarget):
                     wf.write(
                         'OUT_FILE\tfilename={}\tsession={}\tsize={}\tmd5={}\n'
                         .format(f, self.step_md5, f.size(), f.md5()))
     return True
示例#16
0
 def __init__(self, lockfile):
     self._lockfile = lockfile
     self.tlock = threading.Lock()
     self.flock = fasteners.InterProcessLock("/tmp/tmp_lock_file")
示例#17
0
文件: target.py 项目: mr-c/SOS
 def validate(self):
     '''Check if ofiles and ifiles match signatures recorded in md5file'''
     if not self.proc_info or not os.path.isfile(self.proc_info):
         env.logger.trace('Fail because of no signature file {}'.format(
             self.proc_info))
         return False
     env.logger.trace('Validating {}'.format(self.proc_info))
     #
     # file not exist?
     if isinstance(self.output_files, Undetermined):
         env.logger.trace('Fail because of undetermined output files.')
         return False
     sig_files = self.input_files + self.output_files + self.dependent_files
     for x in sig_files:
         if not x.exists('any'):
             env.logger.trace('Missing target {}'.format(x))
             return False
     #
     if '__hard_target__' in env.sos_dict:
         for x in self.output_files:
             if not x.exists('target'):
                 env.logger.trace('Missing real target {}'.format(x))
                 return False
     #
     files_checked = {
         x.fullname(): False
         for x in sig_files if not isinstance(x, Undetermined)
     }
     res = {'input': [], 'output': [], 'depends': [], 'vars': {}}
     cur_type = 'input'
     with open(self.proc_info) as md5:
         cmdMD5 = md5.readline().strip()  # command
         if textMD5(self.script) != cmdMD5:
             env.logger.trace('Fail because of command change')
             return False
         for line in md5:
             if not line.strip():
                 continue
             if line.startswith('#'):
                 if line == '# input\n':
                     cur_type = 'input'
                 elif line == '# output\n':
                     cur_type = 'output'
                 elif line == '# dependent\n':
                     cur_type = 'depends'
                 elif line == '# context\n':
                     cur_type = 'context'
                 elif line == '# step process\n':
                     break
                 else:
                     env.logger.trace(
                         'Unrecognized line in sig file {}'.format(line))
                 continue
             if cur_type == 'context':
                 key, value = line.split('=', 1)
                 try:
                     res['vars'][key.strip()] = eval(value.strip())
                 except Exception as e:
                     env.logger.warning(
                         'Variable {} with value {} cannot be restored from signature'
                         .format(key, value.strip()))
                 continue
             try:
                 f, m = line.rsplit('\t', 1)
                 if '(' in f and ')' in f:
                     freal = eval(f)
                 else:
                     freal = FileTarget(f)
                 if freal.exists('target'):
                     fmd5 = freal.calc_md5()
                 elif freal.exists('signature'):
                     env.logger.info(
                         'Validate with signature of non-existing target {}'
                         .format(freal))
                     fmd5 = freal.md5()
                 else:
                     env.logger.trace('File {} not exist'.format(f))
                     return False
                 res[cur_type].append(freal.fullname(
                 ) if isinstance(freal, FileTarget) else freal)
                 if fmd5 != m.strip():
                     env.logger.trace('MD5 mismatch {}: {} / {}'.format(
                         f, fmd5, m.strip()))
                     return False
                 files_checked[freal.fullname()] = True
             except Exception as e:
                 env.logger.trace('Wrong md5 line {} in {}: {}'.format(
                     line, self.proc_info, e))
                 continue
     #
     if not all(files_checked.values()):
         env.logger.trace('No MD5 signature for {}'.format(', '.join(
             x for x, y in files_checked.items() if not y)))
         return False
     env.logger.trace('Signature matches and returns {}'.format(res))
     # validation success, record signature used
     workflow_sig = env.sos_dict['__workflow_sig__']
     with fasteners.InterProcessLock(workflow_sig + '_'):
         with open(workflow_sig, 'a') as wf:
             wf.write(self.proc_info + '\n')
     return res
示例#18
0
 def set_ref(self, conan_reference, remote):
     with fasteners.InterProcessLock(self._filename + ".lock", logger=logger):
         conan_reference = str(conan_reference)
         remotes, refs = self._load()
         refs[conan_reference] = remote.name
         self._save(remotes, refs)
def _lock(path):
    # lock with both a regular threading lock - for multithreaded access,
    # and fasteners lock for multiprocess access
    with PLUGIN_INSTALL_LOCK:
        with fasteners.InterProcessLock('{0}.lock'.format(path)):
            yield
示例#20
0
 def remotes(self):
     with fasteners.InterProcessLock(self._filename + ".lock", logger=logger):
         remotes, _ = self._load()
         return [Remote(ref, remote, verify_ssl) for ref, (remote, verify_ssl) in remotes.items()]
示例#21
0
 def _lock(path):
     return fasteners.InterProcessLock('{0}.lock'.format(path))
示例#22
0
 def refs(self):
     with fasteners.InterProcessLock(self._filename + ".lock", logger=logger):
         _, refs = self._load()
         return refs
示例#23
0
 def __init__(self, root: Union[Path, str] = None):
     self.root = Path(root or os.getcwd()).resolve()
     self._meltano_ip_lock = fasteners.InterProcessLock(
         self.run_dir("meltano.yml.lock"))
示例#24
0
def storage_file_lock(storage_file_path):
    with fasteners.InterProcessLock('{0}.lock'.format(storage_file_path)):
        yield
示例#25
0
 def write_file(self, path, contents, lock_file):
     with fasteners.InterProcessLock(lock_file) if lock_file else no_op():
         with open(path, "w") as f:
             f.write(contents)
示例#26
0
 def _meta_process():
     with fasteners.InterProcessLock(os.path.join(lock_dir,
                                                  meta_repo.name)):
         bump_meta(meta_repo, repo, version_number)
         git.checkout_master(meta_repo, pull=True)
示例#27
0
        with open(LED_REQ_FILE, 'w') as f:
            json.dump(d, f, indent=4)
        glock.release()

        #"http://localhost:8080/pet/api/check_img/" -d "num=2"


if __name__ == '__main__':
    #global gtag_color
    #パラメータの初期化
    swchfg = ""
    preswchfg = ""
    swchpushtime = datetime.datetime.now()

    #ロックファイルの作成
    glock = fasteners.InterProcessLock('/tmp/lockfile')

    #タグと色の対応づけ情報の取得
    with open(TAG_COLOR_CONF) as f:
        gtag_color = json.load(f)
        pprint.pprint(gtag_color, width=40)

    if gtag_color.get("E20040057305011623802048") != None:
        color = gtag_color.get("E20040057305011623802048")
        print("color", color)

    #sys.exit()

    #gpioの初期化
    GPIO.setmode(GPIO.BCM)
    GPIO.setup(SWITCH_IO, GPIO.IN)
示例#28
0
文件: target.py 项目: mr-c/SOS
    def __init__(self,
                 step_md5,
                 script,
                 input_files=[],
                 output_files=[],
                 dependent_files=[],
                 signature_vars=[]):
        '''Runtime information for specified output files

        output_files:
            intended output file

        '''
        self.step_md5 = step_md5
        self.script = script
        # input can only be a list of files
        if not isinstance(input_files, list):
            if input_files is None:
                self.input_files = []
            else:
                raise RuntimeError(
                    'Input files must be a list of filenames for runtime signature.'
                )
        else:
            self.input_files = [
                FileTarget(x) if isinstance(x, str) else x for x in input_files
            ]

        if dependent_files is None:
            self.dependent_files = []
        elif isinstance(dependent_files, list):
            self.dependent_files = [
                FileTarget(x) if isinstance(x, str) else x
                for x in dependent_files
            ]
        elif isinstance(dependent_files, Undetermined):
            self.dependent_files = dependent_files
        else:
            raise RuntimeError(
                'Dependent files must be a list of filenames or Undetermined for runtime signature.'
            )

        if isinstance(output_files, list):
            self.output_files = [
                FileTarget(x) if isinstance(x, str) else x
                for x in output_files
            ]
        elif isinstance(output_files, Undetermined):
            self.output_files = output_files
        else:
            raise RuntimeError(
                'Output files must be a list of filenames or Undetermined for runtime signature.'
            )

        self.signature_vars = signature_vars

        sig_name = textMD5('{} {} {} {}'.format(self.script, self.input_files,
                                                output_files,
                                                self.dependent_files))
        info_file = os.path.join('.sos', '.runtime', sig_name)
        if not isinstance(self.output_files,
                          Undetermined) and self.output_files:
            # If the output path is outside of the current working directory
            rel_path = os.path.relpath(
                os.path.realpath(self.output_files[0].fullname()),
                env.exec_dir)
            # if this file is not relative to cache, use global signature file
            if rel_path.startswith('../'):
                info_file = os.path.join(os.path.expanduser('~'), '.sos',
                                         '.runtime', sig_name.lstrip(os.sep))
        # path to file
        self.proc_info = '{}.exe_info'.format(info_file)

        # we will need to lock on a file that we do not really write to
        # otherwise the lock will be broken when we write to it.
        self.lock = fasteners.InterProcessLock(self.proc_info + '_')
        if not self.lock.acquire(blocking=False):
            raise UnavailableLock((self.output_files, self.proc_info))
        else:
            env.logger.trace('Lock acquired for output files {}'.format(
                short_repr(self.output_files)))
示例#29
0
文件: sync.py 项目: tjcrone/zarr
 def __getitem__(self, item):
     path = os.path.join(self.path, item)
     lock = fasteners.InterProcessLock(path)
     return lock
示例#30
0
def check_instance_pool(pool_id):
    from .models import Instance, InstancePool, INSTANCE_STATE, PoolStatusEntry, POOL_STATUS_ENTRY_TYPE

    lock = fasteners.InterProcessLock('/tmp/ec2spotmanager.pool%d.lck' %
                                      pool_id)

    if not lock.acquire(blocking=False):
        logger.warning('[Pool %d] Another check still in progress, exiting.',
                       pool_id)
        return

    try:

        instance_pool = InstancePool.objects.get(pk=pool_id)

        criticalPoolStatusEntries = PoolStatusEntry.objects.filter(
            pool=instance_pool, isCritical=True)

        if criticalPoolStatusEntries:
            return

        if instance_pool.config.isCyclic(
        ) or instance_pool.config.getMissingParameters():
            entry = PoolStatusEntry()
            entry.pool = instance_pool
            entry.isCritical = True
            entry.type = POOL_STATUS_ENTRY_TYPE['config-error']
            entry.msg = "Configuration error."
            entry.save()
            return

        config = instance_pool.config.flatten()

        instances_missing = config.size
        running_instances = []

        _update_pool_instances(instance_pool, config)

        instances = Instance.objects.filter(pool=instance_pool)

        for instance in instances:
            instance_status_code_fixed = False
            if instance.status_code >= 256:
                logger.warning(
                    "[Pool %d] Instance with EC2 ID %s has weird state code %d, attempting to fix...",
                    instance_pool.id, instance.ec2_instance_id,
                    instance.status_code)
                instance.status_code -= 256
                instance_status_code_fixed = True

            if instance.status_code in [
                    INSTANCE_STATE['running'], INSTANCE_STATE['pending'],
                    INSTANCE_STATE['requested']
            ]:
                instances_missing -= 1
                running_instances.append(instance)
            elif instance.status_code in [
                    INSTANCE_STATE['shutting-down'],
                    INSTANCE_STATE['terminated']
            ]:
                # The instance is no longer running, delete it from our database
                logger.info(
                    "[Pool %d] Deleting terminated instance with EC2 ID %s from our database.",
                    instance_pool.id, instance.ec2_instance_id)
                instance.delete()
            else:
                if instance_status_code_fixed:
                    # Restore original status code for error reporting
                    instance.status_code += 256

                logger.error(
                    "[Pool %d] Instance with EC2 ID %s has unexpected state code %d",
                    instance_pool.id, instance.ec2_instance_id,
                    instance.status_code)
                # In some cases, EC2 sends undocumented status codes and we don't know why
                # For now, reset the status code to 0, consider the instance still present
                # and hope that with the next update iteration, the problem will be gone.
                instance.status_code = 0
                instance.save()
                instances_missing -= 1
                running_instances.append(instance)

        # Continue working with the instances we have running
        instances = running_instances

        if not instance_pool.isEnabled:
            if running_instances:
                _terminate_pool_instances(instance_pool,
                                          running_instances,
                                          config,
                                          terminateByPool=True)

            return

        if ((not instance_pool.last_cycled)
                or instance_pool.last_cycled < timezone.now() -
                timezone.timedelta(seconds=config.cycle_interval)):
            logger.info(
                "[Pool %d] Needs to be cycled, terminating all instances...",
                instance_pool.id)
            instance_pool.last_cycled = timezone.now()
            _terminate_pool_instances(instance_pool,
                                      instances,
                                      config,
                                      terminateByPool=True)
            instance_pool.save()

            logger.info("[Pool %d] Termination complete.", instance_pool.id)

        if instances_missing > 0:
            logger.info("[Pool %d] Needs %s more instances, starting...",
                        instance_pool.id, instances_missing)
            _start_pool_instances(instance_pool,
                                  config,
                                  count=instances_missing)
        elif instances_missing < 0:
            # Select the oldest instances we have running and terminate
            # them so we meet the size limitation again.
            logger.info(
                "[Pool %d] Has %s instances over limit, terminating...",
                instance_pool.id, -instances_missing)
            instances = Instance.objects.filter(
                pool=instance_pool).order_by('created')[:-instances_missing]
            _terminate_pool_instances(instance_pool, instances, config)
        else:
            logger.debug("[Pool %d] Size is ok.", instance_pool.id)

    finally:
        lock.release()