def __init__(self, f=0): # Reliable default path if f == 0: directory = "DataStore" file_path = os.path.join('C:/', directory) if os.path.isfile(file_path + '/data.json'): self.file_path = file_path + '/data.json' print("Success : File exists. File path is ", file_path + '/data.json') else: os.mkdir(file_path) with lockfile.LockFile(file_path + '/data.json'): with open(file_path + '/data.json', "w") as outfile: json.dump({}, outfile) self.file_path = file_path + '/data.json' print('Success : File path is ', file_path + '/data.json') # Custom path else: if os.path.isfile(f + '/data.json'): self.file_path = f + '/data.json' print("Success : File exists. File path is ", f + '/data.json') else: with lockfile.LockFile(f + '/data.json'): with open(f + '/data.json', "w") as outfile: json.dump({}, outfile) self.file_path = f + '/data.json' print('Success : File path is ', f + '/data.json')
def test_i_am_locking(self): lock1 = lockfile.LockFile(self._testfile(), threaded=False) lock1.acquire() try: assert lock1.is_locked() lock2 = lockfile.LockFile(self._testfile()) try: assert lock1.i_am_locking() assert not lock2.i_am_locking() try: lock2.acquire(timeout=2) except lockfile.LockTimeout: lock2.break_lock() assert not lock2.is_locked() assert not lock1.is_locked() lock2.acquire() else: raise AssertionError('expected LockTimeout...') assert not lock1.i_am_locking() assert lock2.i_am_locking() finally: if lock2.i_am_locking(): lock2.release() finally: if lock1.i_am_locking(): lock1.release()
def check_stale_lock(lock_path, time=20): worker = multiprocessing.cpu_count() if configserver.get('number_worker') > 0: worker = configserver.get('number_worker') lock = lockfile.LockFile(lock_path) test_lock = lockfile.LockFile(lock_path + '_STALE') while not test_lock.i_am_locking(): try: try: # Because this is longer then the lock timeout, we are sure that this is stale test_lock.acquire(timeout=time + time / 5 * worker + (time / 2) * random.random()) except lockfile.LockTimeout: test_lock.break_lock() except Exception as e: logging.debug('Error at locking stale test log: {}'.format(e)) pass try: lock.acquire(timeout=time) lock.release() except lockfile.LockTimeout: logging.debug('Breaking log {}'.format(lock_path)) try: lock.break_lock() except lockfile.NotLocked: pass try: test_lock.release() except lockfile.NotLocked: # Someone broke it - not bad pass
def save(self): with lockfile.LockFile("DH.json"): f = open("DH.json", 'w') f.write(json.dumps(self.DH)) f.close() with lockfile.LockFile("HD.json"): f = open("HD.json", "w") f.write(json.dumps(self.HD)) f.close()
def _test_i_am_locking_helper(self, tbool): lock1 = lockfile.LockFile(self._testfile(), threaded=tbool) assert not lock1.is_locked() lock1.acquire() try: assert lock1.i_am_locking() lock2 = lockfile.LockFile(self._testfile(), threaded=tbool) assert lock2.is_locked() if tbool: assert not lock2.i_am_locking() finally: lock1.release()
def _test_break_lock_helper(self, tbool): lock = lockfile.LockFile(self._testfile(), threaded=tbool) lock.acquire() assert lock.is_locked() lock2 = lockfile.LockFile(self._testfile(), threaded=tbool) assert lock2.is_locked() lock2.break_lock() assert not lock2.is_locked() try: lock.release() except lockfile.NotLocked: pass else: raise AssertionError('break lock failed')
def trylock(path, excl, key_path): with lockfile.LockFile(path): # Prune invalid users if os.path.exists(_lock_path(path)): with open(_lock_path(path)) as f: lock_obj = json.load(f) else: lock_obj = {'excl': False, 'users': {}} for other_key_path in lock_obj['users'].copy(): if not os.path.isfile(other_key_path): del lock_obj['users'][other_key_path] continue with open(other_key_path) as f: key = f.read() if key != lock_obj['users'][other_key_path]: del lock_obj['users'][other_key_path] if ((excl and len(lock_obj['users']) != 0) or (not excl and lock_obj['excl'] and len(lock_obj['users']) != 0)): success = False else: lock_obj['excl'] = excl with open(key_path) as f: lock_obj['users'][key_path] = f.read() success = True # Update lock object file with open(_lock_path(path), 'w') as f: utils.json_dump(lock_obj, f) return success
def __init__(self, name): ## # \var config # \brief Holds the configuration. # # As default the conf variable holds an empty dict which can be used, however it is possible to replace # it with anything that can be serialised as an JSON (e.g. list). self.config = dict() if name is '': logging.warning('Empty name') name = 'UNKNOWN' self._dir = configserver.get('model_path') self._path = self._dir + '/' + name + '.model' if os.path.isdir(self._dir) and os.path.exists(self._path): utility.check_stale_lock(self._path + '_LOCK') lock = lockfile.LockFile(self._path + '_LOCK') with lock: with open(self._path, 'r') as file: try: self.config = json.load(file) except: logging.error('Can not load model {}'.format(self._path))
def acquire_lock_or_die(): try: lf = lockfile.LockFile(_STATE_DIR) lf.acquire(timeout=0) return lf except lockfile.AlreadyLocked: raise CannotGetStateLockException()
def write_data_file(self, content): # File locked with lockfile.LockFile(self.file_path): with open(self.file_path, "w") as outfile: json.dump(content, outfile) return True
def setUp(self): super(MySQLSchemaFixture, self).setUp() random_bits = ''.join( random.choice(string.ascii_lowercase + string.ascii_uppercase) for x in range(8)) self.name = '%s_%s' % (random_bits, os.getpid()) self.passwd = uuid.uuid4().hex lock = lockfile.LockFile('/tmp/nodepool-db-schema-lockfile') with lock: db = pymysql.connect(host="localhost", user="******", passwd="openstack_citest", db="openstack_citest") cur = db.cursor() cur.execute("create database %s" % self.name) cur.execute( "grant all on %s.* to '%s'@'localhost' identified by '%s'" % (self.name, self.name, self.passwd)) cur.execute("flush privileges") self.dburi = 'mysql+pymysql://%s:%s@localhost/%s' % ( self.name, self.passwd, self.name) self.addDetail('dburi', testtools.content.text_content(self.dburi)) self.addCleanup(self.cleanup)
def update(d): """ update contents of config data """ # get (or create) config path p = initialize()['config'] with lockfile.LockFile(p): # load current configuration cnf = load_config(open(p)) # merge def dict_merge(a, b): '''recursively merges dict's. not just simple a['key'] = b['key'], if both a and bhave a key who's value is a dict then dict_merge is called on both values and the result stored in the returned dictionary. from https://www.xormedia.com/recursively-merge-dictionaries-in-python/ ''' if not isinstance(b, dict): return b result = copy.deepcopy(a) for k, v in b.items(): if k in result and isinstance(result[k], dict): result[k] = dict_merge(result[k], v) else: result[k] = copy.deepcopy(v) return result cnf = dict_merge(cnf, d) # save dump_config(cnf, open(p, 'w'))
def __init__(self, **kwargs): super(PCA9632Led, self).__init__(**kwargs) # Sensor specific commands. # TODO: Is this correct? Is there a read URL for leds? # It does not appear in the Synse 1.3 docs as a supported device type here: # https://docs.google.com/document/d/1HDbBjgkhJGTwEFD2fHycDDyUKt5ijWvgpOcLmQis4dk/edit#heading=h.atvexos4wq8q # Motion to deprecate the read led url in 2.0. (In a future commit.) self._command_map[cid.READ] = self._read self._command_map[cid.CHAMBER_LED] = self._chamber_led self._command_map[cid.LED] = self._read self._lock = lockfile.LockFile(self.serial_lock) self.channel = int(kwargs['channel'], 16) self.board_id = int(kwargs['board_offset']) + int( kwargs['board_id_range'][0]) self.board_record = dict() self.board_record['board_id'] = format(self.board_id, '08x') self.board_record['devices'] = [{ 'device_id': kwargs['device_id'], 'device_type': 'vapor_led', 'device_info': kwargs.get('device_info', 'LED Controller') }] # Cache last settings to avoid null responses on led power off. self.last_color = '000000' # This should always be stored as a string. color may be an int. self.last_blink = 'steady' # Do not store no_override here, just steady and blink.
def last_line_generator(file_name): """Creates a generator that yields and removes the last line from file_name""" while True: with lockfile.LockFile(file_name): with open(file_name, "r+b") as f: line = "" f.seek(0, os.SEEK_END) p = f.tell() while p > 0: p -= 1 f.seek(p, os.SEEK_SET) c = f.read(1) if len(c) == 0: # file is empty break elif c == "\n" or c == "\r": if len(line) == 0: continue # skip blank lines else: break # found a complete line else: line = c + line f.truncate(p) if len(line) > 0: yield line else: break
def greentea_get_app_sem(): greentea_home_dir_init() gt_instance_uuid = str(uuid.uuid4()) # String version gt_file_sem_name = os.path.join(HOME_DIR, GREENTEA_HOME_DIR, gt_instance_uuid) gt_file_sem = lockfile.LockFile(gt_file_sem_name) return gt_file_sem, gt_file_sem_name, gt_instance_uuid
def unlock(path, key_path): with lockfile.LockFile(path): with open(_lock_path(path)) as f: lock_obj = json.load(f) del lock_obj['users'][key_path] with open(_lock_path(path), 'w') as f: utils.json_dump(lock_obj, f)
def store_and_or_load_data(outputdir, dataset, data_dir): save_path = os.path.join(outputdir, dataset + "_Manager.pkl") if not os.path.exists(save_path): lock = lockfile.LockFile(save_path) while not lock.i_am_locking(): try: lock.acquire(timeout=60) # wait up to 60 seconds except lockfile.LockTimeout: lock.break_lock() lock.acquire() print "I locked", lock.path #It is not yet sure, whether the file already exists try: if not os.path.exists(save_path): D = DataManager(dataset, data_dir, verbose=True) fh = open(save_path, 'w') pickle.dump(D, fh, -1) fh.close() else: D = pickle.load(open(save_path, 'r')) except: raise finally: lock.release() else: D = pickle.load(open(save_path, 'r')) print "Loaded data" return D
def _test_acquire_no_timeout_helper(self, tbool): # No timeout test e1, e2 = threading.Event(), threading.Event() t = _in_thread(self._lock_wait_unlock, e1, e2) e1.wait() # wait for thread t to acquire lock lock2 = lockfile.LockFile(self._testfile(), threaded=tbool) assert lock2.is_locked() if tbool: assert not lock2.i_am_locking() else: assert lock2.i_am_locking() try: lock2.acquire(timeout=-1) except lockfile.AlreadyLocked: pass else: lock2.release() raise AssertionError("did not raise AlreadyLocked in" " thread %s" % threading.current_thread().get_name()) try: lock2.acquire(timeout=0) except lockfile.AlreadyLocked: pass else: lock2.release() raise AssertionError("did not raise AlreadyLocked in" " thread %s" % threading.current_thread().get_name()) e2.set() # tell thread t to release lock t.join()
def ka_daemon(args): print('Daemon getting here') chroot_directory = None working_directory = "/" umask = 0 uid = None gid = None initgroups = False prevent_core = True detach_process = None files_preserve = None pidfile = lockfile.LockFile('/tmp/ka.pid') stdin = None stdout = None stderr = None signal_map = None ctx = daemon.DaemonContext(pidfile=pidfile) # ctx.open() if args.command == "status": sys, exit(1 if ctx.is_open else 0) elif args.command == "stop": ctx.close() elif args.command == "start": with ctx: s.enter(10, 1, do_shit) s.run() else: raise Exception("Unexpected daemon command {!r}".format(args.command))
def __init__(self, **kwargs): super(RS485Device, self).__init__(lock_path=kwargs['lockfile']) self._set_hardware_type(kwargs.get('hardware_type', 'unknown')) self.device_name = kwargs['device_name'] # The emulators are defaulting to 19200, None. # For real hardware it's a good idea to configure this. self.baud_rate = kwargs.get('baud_rate', 19200) self.parity = kwargs.get('parity', 'N') self.rack_id = kwargs['rack_id'] self.unit = kwargs['device_unit'] self.timeout = kwargs.get('timeout', 0.15) self.method = kwargs.get('method', 'rtu') self._lock = lockfile.LockFile(self.serial_lock) # the device is read from a background process self.from_background = kwargs.get('from_background', False) # Common RS-485 commands. self._command_map = { cid.SCAN: self._scan, cid.SCAN_ALL: self._scan_all, cid.VERSION: self._version, }
def test_check_stale_lock(self): # Stale lock has to be created in an other process - otherwise no error on failure will be raised def create_stale_lock(): stale_lock = lockfile.LockFile('./tests/lock_test/TEST_LOCK') stale_lock.acquire(timeout=1) del stale_lock if os.path.exists('./tests/lock_test/'): shutil.rmtree('./tests/lock_test/') os.mkdir('./tests/lock_test/') target_lock = lockfile.LockFile('./tests/lock_test/TEST_LOCK') # No prior lock utility.check_stale_lock('./tests/lock_test/TEST_LOCK', 2) target_lock.acquire(timeout=1) target_lock.release() # With stale lock process = multiprocessing.Process(target=create_stale_lock) process.start() process.join() utility.check_stale_lock('./tests/lock_test/TEST_LOCK', 2) target_lock.acquire(timeout=1) target_lock.release() shutil.rmtree('./tests/lock_test/')
def __init__(self, **kwargs): super(Max116xxThermistor, self).__init__(**kwargs) logger.debug('Max116xxThermistor kwargs: {}'.format(kwargs)) _instance_name = None # Must be overridden in a subclass. # Sensor specific commands. self._command_map[cid.READ] = self._read self._lock = lockfile.LockFile(self.serial_lock) self.channel = int(kwargs['channel'], 16) self.board_id = int(kwargs['board_offset']) + int( kwargs['board_id_range'][0]) self.board_record = dict() self.board_record['board_id'] = format(self.board_id, '08x') self.board_record['devices'] = [{ 'device_id': kwargs['device_id'], 'device_type': 'temperature', 'device_info': kwargs.get('device_info', 'CEC temperature') }] logger.debug('Max116xxThermistor self: {}'.format(self))
def mark_used(self, temp_ver, key_path): """ Adds or updates the user entry in the user access log for the given template version Args: temp_ver (TemplateVersion): template version to add the entry for key_path (str): Path to the prefix uuid file to set the mark for """ dest = self.get_path(temp_ver) with lockfile.LockFile(dest): with open('%s.users' % dest) as f: users = json.load(f) updated_users = {} for path, key in users['users'].items(): try: with open(path) as f: if key == f.read(): updated_users[path] = key except OSError: pass except IOError: pass with open(key_path) as f: updated_users[key_path] = f.read() users['users'] = updated_users users['last_access'] = int(time.time()) with open('%s.users' % dest, 'w') as f: utils.json_dump(users, f)
def _test_acquire_helper(self, tbool): # As simple as it gets. lock = lockfile.LockFile(self._testfile(), threaded=tbool) lock.acquire() assert lock.i_am_locking() lock.release() assert not lock.is_locked()
def remove_last_line(file_name): """Removes the last line from the given file and returns it. Returns None if the file is empty.""" with lockfile.LockFile(file_name): with open(file_name, "r+b") as f: line = "" f.seek(0, os.SEEK_END) p = f.tell() while p > 0: p -= 1 f.seek(p, os.SEEK_SET) c = f.read(1) if len(c) == 0: # file is empty break elif c == "\n" or c == "\r": if len(line) == 0: continue # skip blank lines else: break # found a complete line else: line = c + line f.truncate(p) if len(line) > 0: return line else: return None
def get_bugs_parent(lp): bugs_parent = "" candidate = 0 bugs_lock = None print "Getting locked bugs directory..." sys.stdout.flush() while bugs_parent == "": candidate_path = join(lp.bugs_top_dir, str(candidate)) if args.noLock: # just use 0 always bugs_parent = join(candidate_path) else: lock = lockfile.LockFile(candidate_path) try: lock.acquire(timeout=-1) bugs_parent = join(candidate_path) bugs_lock = lock except lockfile.AlreadyLocked: candidate += 1 if not args.noLock: atexit.register(bugs_lock.release) for sig in [signal.SIGINT, signal.SIGTERM]: signal.signal(sig, lambda s, f: sys.exit(0)) print "Using dir", bugs_parent lp.set_bugs_parent(bugs_parent) return bugs_parent
def save_targets_ensemble(self, targets): self._make_internals_directory() if not isinstance(targets, np.ndarray): raise ValueError('Targets must be of type np.ndarray, but is %s' % type(targets)) filepath = self._get_targets_ensemble_filename() lock_path = filepath + '.lock' with lockfile.LockFile(lock_path): if os.path.exists(filepath): existing_targets = np.load(filepath) if existing_targets.shape[0] > targets.shape[0] or \ (existing_targets.shape == targets.shape and np.allclose(existing_targets, targets)): return filepath with tempfile.NamedTemporaryFile('wb', dir=os.path.dirname(filepath), delete=False) as fh: np.save(fh, targets.astype(np.float32)) tempname = fh.name os.rename(tempname, filepath) return filepath
def _pull_repo(name, full_name, description, clone_url, num_stars, num_forks, created_at, pushed_at, outputDirectory, dbFile, repos): repo = PyRepo(name, full_name, description, clone_url, time.time(), num_stars, num_forks, created_at, pushed_at) if repo in repos: print("Skipping %s because it has already been cloned" % repo) else: try: # TODO: block all non-english descriptions if len(re.findall(r'[\u4e00-\u9fff]+', description)) > 0: print("Skipping %s because it contains chinese characters" % repo) return repo.clone(outputDirectory) with lockfile.LockFile(dbFile): repos.append(repo) print("Cloned %s" % repo.details()) outfile = open(dbFile, "wb") pickle.dump(repos, outfile) outfile.close() except Exception as e: print("Failed to clone %s due to %s" % (repo, e))
def load_targets_ensemble(self): filepath = self._get_targets_ensemble_filename() lock_path = filepath + '.lock' with lockfile.LockFile(lock_path): targets = np.load(filepath) return targets
def load_targets_ensemble(self): filepath = self._get_targets_ensemble_filename() with lockfile.LockFile(filepath): with open(filepath, 'rb') as fh: targets = np.load(fh, allow_pickle=True) return targets