def reset(session, host_uuid, sr_uuid, is_sr_master): gc_lock = lock.Lock(LOCK_TYPE_RUNNING, sr_uuid) sr_lock = lock.Lock(LOCK_TYPE_SR, sr_uuid) gc_lock.acquire() sr_lock.acquire() sr_ref = session.xenapi.SR.get_by_uuid(sr_uuid) host_ref = session.xenapi.host.get_by_uuid(host_uuid) host_key = "host_%s" % host_ref util.SMlog("RESET for SR %s (master: %s)" % (sr_uuid, is_sr_master)) vdi_recs = session.xenapi.VDI.get_all_records_where( \ "field \"SR\" = \"%s\"" % sr_ref) for vdi_ref, vdi_rec in vdi_recs.iteritems(): vdi_uuid = vdi_rec["uuid"] sm_config = vdi_rec["sm_config"] if sm_config.get(host_key): util.SMlog("Clearing attached status for VDI %s" % vdi_uuid) session.xenapi.VDI.remove_from_sm_config(vdi_ref, host_key) if is_sr_master and sm_config.get("paused"): util.SMlog("Clearing paused status for VDI %s" % vdi_uuid) session.xenapi.VDI.remove_from_sm_config(vdi_ref, "paused") sr_lock.release() gc_lock.release()
def run(self): """ The main menu """ logging.debugv("menu/__init__.py->run(self)", []) if not self.c.getSensorID() == "Unknown": if self.c.getLock() == "Enabled": lock.Lock(self.d).run() title = "\\ZbStart\\n\\ZB" subtitle = "What do you want to do today?" title += subtitle choice = self.d.menu(title, choices=[ ("Configure", "Configure this sensor"), ("Manage", "Start/stop sensor functions"), ("Status", "View the status of this sensor"), ("Log", "View the logfile of this sensor"), #("Update", "Update the sensor scripts"), #("Console", "Open a management console"), ("About", "Learn more about the SURFids sensor"), ("Shutdown", "Shutdown the machine"), ("Lock", "Lock the sensor menu"), ], nocancel=1, width=60, colors=1) #cancel if choice[0] == 1: return elif choice[1] == "Configure": config.Config(self.d).run() elif choice[1] == "Manage": manage.Manage(self.d).run() elif choice[1] == "Status": status.Status(self.d).run() elif choice[1] == "Log": log.Log(self.d).run() elif choice[1] == "Console": console.Console(self.d).run() elif choice[1] == "About": about.About(self.d).run() elif choice[1] == "Shutdown": manage.Manage(self.d).shutdown() elif choice[1] == "Lock": lock.Lock(self.d).run() self.run()
def test_shared_lock(self): lck = lock.Lock() test_name = "ramu" test_email = "*****@*****.**" success, error = lck.lock( lock._lock_type_to_name(lock.LockType.SHARED), test_name, test_email) self.assertEqual(success, True) self.assertEqual(error, None) success, error = lck.lock( lock._lock_type_to_name(lock.LockType.SHARED), test_name, test_email) self.assertEqual(success, False) self.assertEqual(error, lock.ErrAlreadyOwner) self.assertEqual(lck.is_lock_owner(test_email), True) test_name = "gullu" test_email = "*****@*****.**" self.assertEqual(lck.is_lock_owner(test_email), False) success, error = lck.lock( lock._lock_type_to_name(lock.LockType.SHARED), test_name, test_email) self.assertEqual(success, True) self.assertEqual(error, None) self.assertEqual(lck.is_lock_owner(test_email), True)
def get_card_info(card_id): """Criar um serviço para obter as informações de um card gravado no arquivo "/tmp/cards_db.txt" pelo seu id """ columns_name = mu.get_columns_name("magiccard") query_card = pd.DataFrame() try: lock = lk.Lock("/tmp/lock_name.tmp") lock.acquire() dtFrame = pd.read_csv(mu.LOCAL_DB_FILE, delimiter=",,", header=None, names=columns_name, engine='python') query_card = dtFrame.query('GathererId == @card_id') finally: lock.release() if(query_card.empty): ans = "GathererId " + card_id + " not found." status = 400 else: ans = json.dumps(json.loads(query_card.to_json(orient='records')), indent=2) status = 200 return flask.Response(response=ans,status=status)
def test_unlock(self): phys_lock = lock_physical.LockPhysical() virt_lock = lock.Lock(phys_lock) self.assertEqual(virt_lock.state == lock.Unlocked, True) virt_lock.lock() self.assertEqual(virt_lock.state == lock.Locked, True) virt_lock.unlock() self.assertEqual(virt_lock.state == lock.Unlocked, True)
def get_lvm_lock(): """ Open and acquire a system wide lock to wrap LVM calls :return: the created lock """ new_lock = lock.Lock('lvm') new_lock.acquire() return new_lock
def do_trim(session, args): """Attempt to trim the given LVHDSR""" util.SMlog("do_trim: %s" % args) sr_uuid = args["sr_uuid"] os.environ['LVM_SYSTEM_DIR'] = MASTER_LVM_CONF if TRIM_CAP not in util.sr_get_capability(sr_uuid): util.SMlog("Trim command ignored on unsupported SR %s" % sr_uuid) err_msg = { ERROR_CODE_KEY: 'UnsupportedSRForTrim', ERROR_MSG_KEY: 'Trim on [%s] not supported' % sr_uuid } return to_xml(err_msg) # Lock SR, get vg empty space details sr_lock = lock.Lock(vhdutil.LOCK_TYPE_SR, sr_uuid) got_lock = False for i in range(LOCK_RETRY_ATTEMPTS): got_lock = sr_lock.acquireNoblock() if got_lock: break time.sleep(LOCK_RETRY_INTERVAL) if got_lock: try: vg_name = _vg_by_sr_uuid(sr_uuid) lv_name = sr_uuid + TRIM_LV_TAG lv_path = _lvpath_by_vg_lv_name(vg_name, lv_name) # Clean trim LV in case the previous trim attemp failed if lvutil.exists(lv_path): lvutil.remove(lv_path) #Check if VG limits are enough for creating LV. stats = lvutil._getVGstats(vg_name) if (stats['freespace'] < lvutil.LVM_SIZE_INCREMENT): util.SMlog("No space to claim on a full SR %s" % sr_uuid) err_msg = { ERROR_CODE_KEY: 'Trim failed on full SR', ERROR_MSG_KEY: 'No space to claim on a full SR' } result = to_xml(err_msg) else: # Perform a lvcreate, blkdiscard and lvremove to # trigger trim on the array lvutil.create(lv_name, 0, vg_name, size_in_percentage="100%F") cmd = ["/usr/sbin/blkdiscard", "-v", lv_path] stdout = util.pread2(cmd) util.SMlog("Stdout is %s" % stdout) util.SMlog("Trim on SR: %s complete. " % sr_uuid) result = str(True) except util.CommandException, e: err_msg = { ERROR_CODE_KEY: 'TrimException', ERROR_MSG_KEY: e.reason } result = to_xml(err_msg) except:
def test_waiters_persistence(self): lck = lock.Lock() test_email = "*****@*****.**" lck.add_to_waiting_queue(test_email, True) lck.save_lock() lck.load_lock() self.assertEqual(len(lck.waiters()), 1) waiters = lck.waiters()[0] self.assertEqual(waiters["Email"], test_email) self.assertEqual(waiters["Notify"], True)
def doexec_locked(cmd): '''Executes via util.doexec the command specified whilst holding lock''' _lock = None if os.path.basename(cmd[0]) == 'iscsiadm': _lock = lock.Lock(LOCK_TYPE_RUNNING, 'iscsiadm') _lock.acquire() #util.SMlog("%s" % (cmd)) (rc, stdout, stderr) = util.doexec(cmd) if _lock != None and _lock.held(): _lock.release() return (rc, stdout, stderr)
def test_owners_persistence(self): lck = lock.Lock() test_name = "ramu" test_email = "*****@*****.**" lck.add_lock_owner(test_name, test_email) lck.save_lock() lck.load_lock() self.assertEqual(len(lck.owners()), 1) owner = lck.owners()[0] self.assertEqual(owner["Name"], test_name) self.assertEqual(owner["Email"], test_email)
def test_history_persistence(self): lck = lock.Lock() test_email = "*****@*****.**" test_action = "Locked" lck.add_history(test_email, test_action) lck.save_lock() lck.load_lock() self.assertEqual(len(lck.history()), 1) action = lck.history()[0] self.assertEqual(action["Email"], test_email) self.assertEqual(action["Action"], test_action)
def do_trim(session, args): """Attempt to trim the given LVHDSR""" util.SMlog("do_trim: %s" % args) sr_uuid = args["sr_uuid"] if TRIM_CAP not in util.sr_get_capability(sr_uuid): util.SMlog("Trim command ignored on unsupported SR %s" % sr_uuid) err_msg = {ERROR_CODE_KEY: 'UnsupportedSRForTrim', ERROR_MSG_KEY: 'Trim on [%s] not supported' % sr_uuid} return to_xml(err_msg) # Lock SR, get vg empty space details sr_lock = lock.Lock(vhdutil.LOCK_TYPE_SR, sr_uuid) got_lock = False for i in range(LOCK_RETRY_ATTEMPTS): got_lock = sr_lock.acquireNoblock() if got_lock: break time.sleep(LOCK_RETRY_INTERVAL) if got_lock: try: vg_name = _vg_by_sr_uuid(sr_uuid) lv_name = sr_uuid + TRIM_LV_TAG lv_path = _lvpath_by_vg_lv_name(vg_name, lv_name) # Clean trim LV in case the previous trim attemp failed if lvutil.exists(lv_path): lvutil.remove(lv_path) # Perform a lvcreate and lvremove to trigger trim on the array lvutil.create(lv_name, 0, vg_name, size_in_percentage="100%F") lvutil.remove(lv_path, config_param="issue_discards=1") util.SMlog("Trim on SR: %s complete. " % sr_uuid) result = str(True) except: err_msg = { ERROR_CODE_KEY: 'UnknownTrimException', ERROR_MSG_KEY: 'Unknown Exception: trim failed on SR [%s]' % sr_uuid } result = to_xml(err_msg) _log_last_triggered(session, sr_uuid) sr_lock.release() return result else: util.SMlog("Could not complete Trim on %s, Lock unavailable !" \ % sr_uuid) err_msg = {ERROR_CODE_KEY: 'SRUnavailable', ERROR_MSG_KEY: 'Unable to get SR lock [%s]' % sr_uuid} return to_xml(err_msg)
def test_lock_acquire_noblock_release(self, context): self.setup_fcntl_return(context) lck = lock.Lock("somename") lck.acquireNoblock() self.assertTrue(lck.held()) lck.release() self.assertFalse(lck.held())
def test_max_history(self): lck = lock.Lock() test_email = "*****@*****.**" test_action = "Locked" offset = 20 for x in range(lock.max_history + offset): lck.add_history(f"{test_email}{x!s}", test_action) self.assertEqual(len(lck.history()), lock.max_history) first_history = lck.history()[0] self.assertEqual(first_history["Email"], f"{test_email}{offset!s}")
def test_lock_acquire_twice_release(self, context): self.setup_fcntl_return(context) lck1 = lock.Lock("somename") lck1.acquire() self.assertTrue(lck1.held()) # This should be the same lock as lck1 lck2 = lock.Lock("somename") lck2.acquire() self.assertTrue(lck2.held()) lck2.release() # As lck1 and lck2 refer to the same lock they should still be held self.assertTrue(lck2.held()) self.assertTrue(lck1.held()) lck1.release() self.assertFalse(lck1.held())
def callback(ch, method, properties, body): card = json.loads(body.decode('utf-8'), object_pairs_hook=OrderedDict) print("Receiving card...") try: lock = lk.Lock("/tmp/lock_name.tmp") lock.acquire() with open(mu.LOCAL_DB_FILE, 'a') as file: values = list(card.values()) string = "".join(",," + str(value) for value in values) #print(string[2:]) file.write(string[2:]) file.write('\n') finally: lock.release()
def exn_on_failure(cmd, message): '''Executes via util.doexec the command specified. If the return code is non-zero, raises an ISCSIError with the given message''' _lock = None if os.path.basename(cmd[0]) == 'iscsiadm': _lock = lock.Lock(LOCK_TYPE_RUNNING, 'iscsiadm') _lock.acquire() (rc,stdout,stderr) = util.doexec(cmd) if _lock <> None and _lock.held(): _lock.release() if rc==0: return (stdout,stderr) else: msg = 'rc: %d, stdout: %s, stderr: %s' % (rc,stdout,stderr) raise xs_errors.XenError('SMGeneral', opterr=msg)
def test_exclusive_unlock(self): lck = lock.Lock() test_name = "ramu" test_email = "*****@*****.**" lck.unlock(test_email) self.assertEqual(lck.is_lock_owner(test_email), False) success, error = lck.lock( lock._lock_type_to_name(lock.LockType.EXCLUSIVE), test_name, test_email) self.assertEqual(success, True) self.assertEqual(error, None) self.assertEqual(lck.is_lock_owner(test_email), True) lck.unlock(test_email) self.assertEqual(lck.is_lock_owner(test_email), False)
def __init__(self, syncer, local, folder=None): self.syncer = syncer self.local = local self.meta_file = meta.LocalMetaFile(self.local) self.lock = lock.Lock(self) self.client = PithosClient(syncer.url, syncer.token, syncer.account, syncer.container) if folder is None: # working copy already init'ed self.meta_file.load() self.folder = self.meta_file.remote_dir else: # working copy not init'ed # the caller must call .init() or .clone() on it self.folder = folder
def __init__(self, storage_path='storage'): self.data_path = os.path.join(storage_path, 'Data') self.walk = os.walk self.copy = shutil.copy self.rename = os.rename self.remove = os.remove self.utime = os.utime self.dir_name = os.path.dirname self.last_accessed = os.path.getatime self.last_modified = os.path.getmtime self.path_sep = os.path.sep self.base_name = os.path.basename self.abs_path = os.path.abspath self.make_temp_dir = tempfile.mkdtemp self.change_dir = os.chdir self.current_dir = os.getcwd self.lock = lock.Lock() self._mtimes = {}
def edit_wwid(wwid, remove=False): """Add a wwid to the list of exceptions or remove if remove is set to 1. """ tmp_file = CONF_FILE+"~" filt_regex = re.compile('^\s*%s\s*{'%BELIST_TAG) wwid_regex = re.compile('^\s*wwid\s+\"%s\"'%wwid) conflock = lock.Lock(LOCK_TYPE_HOST, LOCK_NS) conflock.acquire() try: shutil.copy2(CONF_FILE, tmp_file) except: util.SMlog("Failed to create temp file %s" %(tmp_file)) raise add_mode = True for line in fileinput.input(tmp_file, inplace=1): if add_mode: print line, else: if wwid_regex.match(line): add_mode = True else: print line, continue if filt_regex.match(line): if remove: # looking for the line to remove add_mode = False continue else: print "\twwid \"%s\""%wwid shutil.move(tmp_file, CONF_FILE)
def __init__(self, name=None, ip=None): self._lock = lock.Lock() self._config = configparser.ConfigParser() self._config.read(_get_config_file_path()) self._chassis_name = _get_default_name() self._chassis_ip = _get_default_ip() if 'Chassis' in self._config: if 'ip' in self._config['Chassis']: self._chassis_ip = self._config['Chassis']['ip'] if 'name' in self._config['Chassis']: self._chassis_name = self._config['Chassis']['name'] if name is not None and name != '': self._chassis_name = name if ip is not None and ip != '': self._chassis_ip = ip self._gsheet = gsheet.GSheet(self._chassis_name, self._chassis_ip) self._update_gsheet()
def test_exclusive_lock(self): lck = lock.Lock() test_name = "ramu" test_email = "*****@*****.**" success, error = lck.lock( lock._lock_type_to_name(lock.LockType.EXCLUSIVE), test_name, test_email) self.assertEqual(success, True) self.assertEqual(error, None) self.assertEqual(lck.is_lock_owner(test_email), True) success, error = lck.lock( lock._lock_type_to_name(lock.LockType.EXCLUSIVE), test_name, test_email) self.assertEqual(success, False) self.assertEqual(error, lock.ErrAlreadyOwner) test_email = "*****@*****.**" success, error = lck.lock( lock._lock_type_to_name(lock.LockType.EXCLUSIVE), test_name, test_email) self.assertEqual(success, False) self.assertEqual(error, lock.ErrNotAvailable)
def __init__(self, cmd=None): self.lock = lock.Lock(LVM_LOCK) self.cmd = cmd self.locked = False
def test_lock_with_namespace_creates_file(self, context): lck = lock.Lock('somename', ns='namespace') self.assertTrue( os.path.exists(os.path.join(lck.BASE_DIR, 'namespace', 'somename')))
def test_type_persistence(self): lck = lock.Lock() lck.change_lock_type(lock.LockType.EXCLUSIVE) lck.save_lock() lck.load_lock() self.assertEqual(lck.type(), lock.LockType.EXCLUSIVE)
mpp_path_update = False match_bySCSIid = False util.daemon() if len(sys.argv) == 3: match_bySCSIid = True SCSIid = sys.argv[1] mpp_path_update = True mpp_entry = sys.argv[2] # We use flocks to ensure that only one process # executes at any one time, however we must make # sure that any subsequent changes are always # correctly updated, so we allow an outstanding # process to queue behind the running one. mpathcountlock = lock.Lock(LOCK_TYPE_HOST, LOCK_NS1) mpathcountqueue = lock.Lock(LOCK_TYPE_HOST, LOCK_NS2) util.SMlog("MPATH: Trying to acquire the lock") if mpp_path_update: mpathcountlock.acquire() elif not mpathcountlock.acquireNoblock(): if not mpathcountqueue.acquireNoblock(): # There is already a pending update # so safe to exit sys.exit(0) # We acquired the pending queue lock # so now wait on the main lock mpathcountlock.acquire() mpathcountqueue.release() util.SMlog("MPATH: I get the lock")
def test_fresh_lock(self): lck = lock.Lock() self.assertEqual(lck.type(), lock.LockType.FREE) self.assertEqual(lck.owners(), []) self.assertEqual(lck.waiters(), []) self.assertEqual(lck.history(), [])
import os import json from awscrt import mqtt import face_detector import iot_comm_aws import lock import lock_physical # Globals aws_pipeline = queue.Queue() iot_pipeline = queue.Queue() stop_cap_event = threading.Event() phys_lock = lock_physical.LockPhysical() vir_lock = lock.Lock(phys_lock) # Mqtt Dispatcher def mqtt_dispatch(topic: str, payload: dict): topic_levels = topic.split("/") if topic_levels[0] == "cmd": # Handle incoming command if topic_levels[1] == "lock": if topic_levels[2] == "state": if payload["state"] == "lock": logging.info(vir_lock.lock()) mqtt_send_state("lock") elif payload["state"] == "unlock": logging.info(vir_lock.unlock()) mqtt_send_state("unlock")
def rescan(hostid): try: try: # get the current time, call it x curr_time = datetime.utcnow() # acquire common lock l = lock.Lock(RESCAN_LOCK_NAME, HOST_LOCK_NAME_FORMAT % hostid) l.acquire() while(1): # check if starttime_anything exists tryRescan = False files = glob.glob(START_TIME_FILE_PATH_FORMAT % (hostid, '*')) if len(files) == 0: # if not, create starttime_x path = START_TIME_FILE_PATH_FORMAT % (hostid, str(curr_time)) path = path.replace(' ', '_') open(path, 'w').close() # release common lock l.release() # perform host rescan _rescan_hostID(hostid) # acquire common lock l.acquire() # remove starttime_x os.unlink(path) # release common lock and exit l.release() break else: # if it does # read the start time start_time = files[0].split(START_TIME_FILE_PATH_FORMAT % (hostid, ''))[1] start_time = DateTime(start_time.replace('__', ' ')) while(1): # stick around till start_time exists # drop common lock l.release() # sleep for a sec time.sleep(1) # acquire common lock l.acquire() # check if start time exists if len(glob.glob(START_TIME_FILE_PATH_FORMAT % \ (hostid, '*'))) == 0: tryRescan = False if DateTime(str(curr_time)) < start_time: # we are cool, this started before the rescan # drop common lock and go home l.release() else: # try to start a rescan tryRescan = True break # else continue by default if not tryRescan: break except Exception, e: util.SMlog("Failed to perform rescan of host: %s. "\ "Error: %s" % (hostid, str(e))) finally: l.release()