def detachThin(session, lvmCache, srUuid, vdiUuid): """Shrink the VDI to the minimal size if no one is using it""" lvName = LV_PREFIX[vhdutil.VDI_TYPE_VHD] + vdiUuid path = os.path.join(VG_LOCATION, VG_PREFIX + srUuid, lvName) lock = Lock(vhdutil.LOCK_TYPE_SR, srUuid) _tryAcquire(lock) vdiRef = session.xenapi.VDI.get_by_uuid(vdiUuid) vbds = session.xenapi.VBD.get_all_records_where( \ "field \"VDI\" = \"%s\"" % vdiRef) numPlugged = 0 for vbdRec in vbds.values(): if vbdRec["currently_attached"]: numPlugged += 1 if numPlugged > 1: raise util.SMException("%s still in use by %d others" % \ (vdiUuid, numPlugged - 1)) lvmCache.activate(NS_PREFIX_LVM + srUuid, vdiUuid, lvName, False) try: newSize = calcSizeLV(vhdutil.getSizePhys(path)) deflate(lvmCache, lvName, newSize) finally: lvmCache.deactivate(NS_PREFIX_LVM + srUuid, vdiUuid, lvName, False) lock.release()
def daemonize(args, callback): with DaemonContext(): create_process = False lock = Lock(LOCKFILE, os.getpid(), args.name, args.sea_ep[0], args.sea_ep[1], args.port) if lock.is_locked(): lock_pid = lock.get_pid() if not lock.is_same_file(args.name, args.sea_ep[0], args.sea_ep[1]) \ or not is_process_running(lock_pid): try: os.kill(lock_pid, signal.SIGQUIT) except OSError: pass except TypeError: pass lock.break_lock() create_process = True else: create_process = True if create_process: lock.acquire() callback(args.name, season=args.sea_ep[0], episode=args.sea_ep[1], serve=True, port=args.port) lock.release()
class PhysicalView(Base): def __init__(self, dfs): Base.__init__(self, dfs) self.lock_ = Lock(dfs) self.createBaseFolder() def read(self, fileName, buf, offset, bufsize): # TODO add thread safetly filePath = os.path.join(self.getBasePath(), fileName) size = self.getFileSize(fileName) if offset + bufsize > size: self.log_.w('tried to read ' + fileName + ' but size is ' + str(size) + ' and bufsize + offset = ' + str(offset + bufsize)) return err.InvalidBufferSize self.lock_.acquire() try: f = open(filePath, "r") except Exception, ex: self.log_.e('error opening file in read mode ' + filePath + ': ' + str(ex)) self.lock_.release() return err.FileNotFound status = err.OK f.seek(offset) try: data = f.read(bufsize) for i, d in enumerate(data): buf[i] = d except Exception, ex: self.log_.e('failed to read ' + filePath + ' from ' + str(offset) + ' to ' + str(offset + bufsize) + ': ' + str(ex)) status = err.CannotReadFile
class Test02_MfcLock(unittest.TestCase): '''Unittest for mfc1.Lock().''' def setUp(self): '''Prepare test.''' self.__lock = Lock('./', 'andy') def test_get_pid(self): '''Testing mfc1.Lock.get_pid().''' r = self.__lock.get_pid('mindfulclock1') self.failUnlessEqual(first=r, second=0) def test_one_instance(self): '''Testing mfc1.Lock.one_instance().''' r = self.__lock.one_instance('mindfulclock1') self.failUnlessEqual(first=r, second=True) def test_write_check_delete_lock(self): '''Testing write_lock(), check_lock(), delete_lock().''' r = self.__lock.write_lock() self.failUnlessEqual(first=r, second=True) # chek lock file. r = self.__lock.is_lock() self.failUnlessEqual(first=r, second=True) # delete lock file. r = self.__lock.delete_lock() self.failUnlessEqual(first=r, second=True)
def main(): lock=Lock() lock.set_server_url("http://20.20.20.157:5000/validate-pin") # lock.set_wifi_conf("GOT","MASTER2D") lock.connect_to_wifi() lock.set_rc522_uid_addr(0x08) lock.run()
def _kickGC(self): # don't bother if an instance already running (this is just an # optimization to reduce the overhead of forking a new process if we # don't have to, but the process will check the lock anyways) lockRunning = Lock(cleanup.LOCK_TYPE_RUNNING, self.uuid) if not lockRunning.acquireNoblock(): if cleanup.should_preempt(self.session, self.uuid): util.SMlog( "Aborting currently-running coalesce of garbage VDI") try: if not cleanup.abort(self.uuid, soft=True): util.SMlog("The GC has already been scheduled to " "re-start") except util.CommandException as e: if e.code != errno.ETIMEDOUT: raise util.SMlog('failed to abort the GC') finally: return else: util.SMlog("A GC instance already running, not kicking") return else: lockRunning.release() util.SMlog("Kicking GC") cleanup.gc(self.session, self.uuid, True)
class Test02_MfcLock(unittest.TestCase): """Unittest for mfc1.Lock().""" def setUp(self): """Prepare test.""" self.lock = Lock("./", "andy") def test_get_pid(self): """Testing mfc1.Lock.get_pid().""" r = self.lock.get_pid("mindfulclock1") self.failUnlessEqual(first=r, second=0) def test_one_instance(self): """Testing mfc1.Lock.one_instance().""" r = self.lock.one_instance("mindfulclock1") self.failUnlessEqual(first=r, second=True) def test_write_check_delete_lock(self): """Testing write_lock(), check_lock(), delete_lock().""" r = self.lock.write_lock() self.failUnlessEqual(first=r, second=True) # chek lock file. r = self.lock.is_lock() self.failUnlessEqual(first=r, second=True) # delete lock file. r = self.lock.delete_lock() self.failUnlessEqual(first=r, second=True)
class Test02_MfcLock(unittest.TestCase): '''Unittest for mfc1.Lock().''' def setUp(self): '''Prepare test.''' self.__lock = Lock('./', 'andy') def test_delete_lock(self): '''Testing mfc1.Lock.delete_lock().''' r = self.__lock.delete_lock() self.failUnlessEqual(first=r, second=False) def test_is_lock(self): '''Testing mfc1.Lock.is_lock().''' r = self.__lock.is_lock() self.failUnlessEqual(first=r, second=False) def test_write_lock(self): '''Testing mfc1.Lock.write_lock().''' r = self.__lock.write_lock() self.failUnlessEqual(first=r, second=True) # chek lock file. r = self.__lock.is_lock() self.failUnlessEqual(first=r, second=True) # delete lock file. r = self.__lock.delete_lock() self.failUnlessEqual(first=r, second=True)
def daemonize(args, callback): with DaemonContext(): from touchandgo.logger import log_set_up log_set_up(True) log = logging.getLogger('touchandgo.daemon') log.info("running daemon") create_process = False lock = Lock(LOCKFILE, os.getpid(), args.name, args.sea_ep[0], args.sea_ep[1], args.port) if lock.is_locked(): log.debug("lock active") lock_pid = lock.get_pid() if not lock.is_same_file(args.name, args.sea_ep[0], args.sea_ep[1]) \ or not is_process_running(lock_pid): try: log.debug("killing process %s" % lock_pid) os.kill(lock_pid, signal.SIGQUIT) except OSError: pass except TypeError: pass lock.break_lock() create_process = True else: create_process = True if create_process: log.debug("creating proccess") lock.acquire() callback() lock.release() else: log.debug("same daemon process")
class Test02_MfcLock(unittest.TestCase): '''Unittest for mfc1.Lock().''' def setUp(self): '''Prepare test.''' self.lock = Lock('./', 'andy') def test_get_pid(self): '''Testing mfc1.Lock.get_pid().''' r = self.lock.get_pid('mindfulclock1') self.failUnlessEqual(first=r, second=0) def test_one_instance(self): '''Testing mfc1.Lock.one_instance().''' r = self.lock.one_instance('mindfulclock1') self.failUnlessEqual(first=r, second=True) def test_write_check_delete_lock(self): '''Testing write_lock(), check_lock(), delete_lock().''' r = self.lock.write_lock() self.failUnlessEqual(first=r, second=True) # chek lock file. r = self.lock.is_lock() self.failUnlessEqual(first=r, second=True) # delete lock file. r = self.lock.delete_lock() self.failUnlessEqual(first=r, second=True)
def testStateUnlocked(self): testState2: bool = False lock2 = Lock(state=testState2) lock2.set_needKey = testState2 self.assertEqual(lock2._state, False) #It isn't being called here either self.assertEqual(lock2._needKey, False)
def lock_file(self): '''Handle the mc1.Lock object. Public objects: lock = mfc1.Lock lockstate = Status of the lock file. '' = No status. 'written' = Lock file is written 'deleted' = Lock file is deleted 'exit' = Program exit ''' userid = wx.GetUserId() wxpath = wx.StandardPaths.Get() userdir = wxpath.GetDocumentsDir() self.lock = Lock(path=userdir, userid=userid) self.lockstate = '' if self.lock.one_instance('mindfulclock1'): # One instance. self.lock.write_lock() self.lockstate = 'written' else: # More than one instance. if self.start_question(): # Start the clock. self.lock.write_lock() self.lockstate = 'written' else: # Exit the program. self.lockstate = 'exit'
class Election: def __init__(self, name, is_master_callback, lost_master_callback): self.lock = Lock(name, lock_callback=self._lock, lock_lost_callback=self._lost_lock) self.master_callback = is_master_callback self.lost_master_callback = lost_master_callback self.running = False self.condition = threading.Condition() def shutdown(self): self.running = False self.condition.acquire() self.condition.notify() self.condition.release() def run(self): self.running = True while self.running: self.lock.acquire() self.condition.acquire() self.condition.wait() self.condition.release() self.lock.release() def _lock(self): self.master_callback() def _lost_lock(self): self.lost_master_callback()
def test_release_free_resource(): """ TestCase Senario: trying to send a release request for a free resource which is not allowed so the wrong message response is expected. """ #targeted resource and client_address resource_name = 'resourceX' client_address = '127.0.0.1' client_lock = Lock(resource_name, client_address) status = client_lock.check_status() #initiate socket s and send the release request for resourceX s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST, PORT)) s.sendall('release resourceX') data = s.recv(1024) #expecting error message as a response assert data == 'resource is already free.' # the status of the resource was free and still free assert client_lock.check_status() == 'free' #close the socket client s.close()
def checkLocked(obj, ns): """Lock-protected access""" lock = Lock(obj, ns) lock.acquire() try: return RefCounter.check(obj, ns) finally: lock.release()
def __init__(self, name, is_master_callback, lost_master_callback): self.lock = Lock(name, lock_callback=self._lock, lock_lost_callback=self._lost_lock) self.master_callback = is_master_callback self.lost_master_callback = lost_master_callback self.running = False self.condition = threading.Condition()
def setReadonly(self, lvName, readonly): path = self._getPath(lvName) if self.lvs[lvName].readonly != readonly: uuids = util.findall_uuid(path) ns = lvhdutil.NS_PREFIX_LVM + uuids[0] lock = Lock(uuids[1], ns) lock.acquire() lvutil.setReadonly(path, readonly) lock.release() self.lvs[lvName].readonly = readonly
def setReadonly(self, lvName, readonly): path = self._getPath(lvName) if self.lvs[lvName].readonly != readonly: uuids = util.findall_uuid(path) ns = lvhdutil.NS_PREFIX_LVM + uuids[0] # Taking this lock is needed to avoid a race condition # with tap-ctl open (which is now taking the same lock) lock = Lock("lvchange-p", ns) lock.acquire() lvutil.setReadonly(path, readonly) lock.release() self.lvs[lvName].readonly = readonly
def activate(self, ns, ref, lvName, binary): lock = Lock(ref, ns) lock.acquire() try: count = RefCounter.get(ref, binary, ns) if count == 1: try: self.activateNoRefcount(lvName) except util.CommandException: RefCounter.put(ref, binary, ns) raise finally: lock.release()
def _cbt_op(self, uuid, func, *args): # Lock cbtlog operations from lock import Lock lock = Lock("cbtlog", str(uuid)) lock.acquire() try: logname = self._get_cbt_logname(uuid) activated = self._activate_cbt_log(logname) ret = func(*args) if activated: self._deactivate_cbt_log(logname) return ret finally: lock.release()
def deactivate(self, sr_uuid, vdi_uuid): """Deactivate VDI - called post tapdisk close""" if self._get_blocktracking_status(): from lock import Lock lock = Lock("cbtlog", str(vdi_uuid)) lock.acquire() try: logpath = self._get_cbt_logpath(vdi_uuid) logname = self._get_cbt_logname(vdi_uuid) self._cbt_op(vdi_uuid, cbtutil.set_cbt_consistency, logpath, True) # Finally deactivate log file self._deactivate_cbt_log(logname) finally: lock.release()
def load(self, sr_uuid): self.ops_exclusive = FileSR.OPS_EXCLUSIVE self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) self.sr_vditype = SR.DEFAULT_TAP self.driver_config = DRIVER_CONFIG if not self.dconf.has_key('server'): raise xs_errors.XenError('ConfigServerMissing') self.remoteserver = self.dconf['server'] self.nosubdir = False if self.sr_ref and self.session is not None: self.sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref) else: self.sm_config = self.srcmd.params.get('sr_sm_config') or {} self.nosubdir = self.sm_config.get('nosubdir') == "true" if self.dconf.has_key('serverpath'): self.remotepath = os.path.join(self.dconf['serverpath'], not self.nosubdir and sr_uuid or "").encode('utf-8') self.path = os.path.join(SR.MOUNT_BASE, sr_uuid) # Handle optional dconf attributes self.transport = DEFAULT_TRANSPORT if self.dconf.has_key('useUDP') and self.dconf['useUDP'] == 'true': self.transport = "udp" self.nfsversion = nfs.validate_nfsversion(self.dconf.get('nfsversion')) if 'options' in self.dconf: self.options = self.dconf['options'] else: self.options = ''
def get_agencies(cls, truncate=True): """ Get a list of agencies """ with Lock("agencies"): request_params = { 'command': 'agencyList', } agencies_xml, api_call = cls.request(request_params, 'agency') if not agencies_xml: return [] db.session.begin() if truncate: db.session.query(Agency).delete() agencies = [] for agency in agencies_xml: region = Region.get_or_create(db.session, title=agency.get('regionTitle')) a = Agency.get_or_create(db.session, tag=agency.get('tag'), title=agency.get('title'), short_title=agency.get('shortTitle'), region=region, api_call=api_call) agencies.append(a) db.session.commit() return agencies
def __init__(self, config): """A valid individual able to run tasks. Must be validated as a prerequisite Args: config: a self.config ConfigParser object. """ self.__config = config self.__subjectDir = self.__config.get('arguments', 'subjectDir') self.__name = os.path.basename(self.__subjectDir) self.__logDir = os.path.join(self.__subjectDir, self.__config.get('dir', 'log')) if not os.path.exists(self.__logDir): os.mkdir(self.__logDir) Lock.__init__(self, self.__logDir, self.__name)
def deactivateVdi(sr_uuid, vdi_uuid, vhd_path): name_space = lvhdutil.NS_PREFIX_LVM + sr_uuid lock = Lock(vdi_uuid, name_space) lock.acquire() try: count = RefCounter.put(vdi_uuid, False, name_space) if count > 0: return try: lvutil.deactivateNoRefcount(vhd_path) except Exception, e: util.SMlog(" lv de-activate failed for %s with error %s" % (vhd_path, str(e))) RefCounter.get(vdi_uuid, False, name_space) finally: lock.release()
def _kickGC(self): # don't bother if an instance already running (this is just an # optimization to reduce the overhead of forking a new process if we # don't have to, but the process will check the lock anyways) lockRunning = Lock(cleanup.LOCK_TYPE_RUNNING, self.uuid) if not lockRunning.acquireNoblock(): if cleanup.should_preempt(self.session, self.uuid): util.SMlog("Aborting currently-running coalesce of garbage VDI") try: if not cleanup.abort(self.uuid, soft=True): util.SMlog("The GC has already been scheduled to " "re-start") except util.CommandException, e: if e.code != errno.ETIMEDOUT: raise util.SMlog("failed to abort the GC") finally:
class LockTest(unittest.TestCase): def setUp(self): self.lock=Lock('3176','BE', 'normal') def test_lockstring(self): self.assertEqual(str(self.lock), '3176 locked') def test_getStatus(self): self.assertEqual(self.lock.getStatus(), 'locked') def test_setStatus(self): self.lock.setStatus('unlocked') self.assertEqual(self.lock.getStatus(), 'unlocked') def test_GetDoor(self): self.assertEqual(self.lock.getDoor(), '3176')
def OnAddedToSpace(self, ballpark, dbspacecomponent): persister = Persister(ballpark.solarsystemID, self.itemID, dbspacecomponent) bountyEscrowBonus, bounties = persister.GetStateForSystem() iskRegistry = IskRegistry(bounties) iskMover = IskMover(ballpark.broker.account) itemCreator = GetItemCreator(ballpark.inventory2, ballpark, self.attributes.tagTypeIDs.keys()) escrow = Escrow(self, ballpark, iskRegistry, iskMover, itemCreator, persister) item = ballpark.inventory2.GetItem(self.itemID) eventLogger = EventLogger(ballpark.broker.eventLog, ballpark.solarsystemID, item.ownerID, self.itemID) notifier = Notifier(ballpark.broker.notificationMgr) self.rangeNotifier = RangeNotifier(ballpark.solarsystemID, ballpark, ballpark.broker.machoNet, self.GetWallclockTime) ballpark.proximityRegistry.RegisterForProximity( self.itemID, 30000, self.rangeNotifier.PlayerInRange) lock = Lock(self) self.warpScrambler = WarpScrambler(self.itemID, lock, ballpark.dogmaLM) self.Initialize(ballpark, escrow, lock, persister, eventLogger, notifier) self.escrow.SetBonus(bountyEscrowBonus)
def _kickGC(self): # don't bother if an instance already running (this is just an # optimization to reduce the overhead of forking a new process if we # don't have to, but the process will check the lock anyways) lockRunning = Lock(cleanup.LOCK_TYPE_RUNNING, self.uuid) if not lockRunning.acquireNoblock(): if cleanup.should_preempt(self.session, self.uuid): util.SMlog("Aborting currently-running coalesce of garbage VDI") cleanup.abort(self.uuid) else: util.SMlog("A GC instance already running, not kicking") return else: lockRunning.release() util.SMlog("Kicking GC") cleanup.gc(self.session, self.uuid, True)
def load(self, sr_uuid): self.ops_exclusive = OPS_EXCLUSIVE self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) self.sr_vditype = vhdutil.VDI_TYPE_VHD if not self.dconf.has_key('location') or not self.dconf['location']: raise xs_errors.XenError('ConfigLocationMissing') self.path = self.dconf['location'] self.attached = False
def attachThin(journaler, srUuid, vdiUuid): """Ensure that the VDI LV is expanded to the fully-allocated size""" lvName = LV_PREFIX[VDI_TYPE_VHD] + vdiUuid vgName = VG_PREFIX + srUuid lock = Lock(vhdutil.LOCK_TYPE_SR, srUuid) lvmCache = journaler.lvmCache lock.acquire() vhdInfo = vhdutil.getVHDInfoLVM(lvName, extractUuid, vgName) newSize = calcSizeVHDLV(vhdInfo.sizeVirt) currSizeLV = lvmCache.getSize(lvName) if newSize <= currSizeLV: return lvmCache.activate(NS_PREFIX_LVM + srUuid, vdiUuid, lvName, False) try: inflate(journaler, srUuid, vdiUuid, newSize) finally: lvmCache.deactivate(NS_PREFIX_LVM + srUuid, vdiUuid, lvName, False) lock.release()
def _kickGC(self): # don't bother if an instance already running (this is just an # optimization to reduce the overhead of forking a new process if we # don't have to, but the process will check the lock anyways) lockRunning = Lock(cleanup.LOCK_TYPE_RUNNING, self.uuid) if not lockRunning.acquireNoblock(): if cleanup.should_preempt(self.session, self.uuid): util.SMlog( "Aborting currently-running coalesce of garbage VDI") cleanup.abort(self.uuid) else: util.SMlog("A GC instance already running, not kicking") return else: lockRunning.release() util.SMlog("Kicking GC") cleanup.gc(self.session, self.uuid, True)
def __init__(self, dfs, fileSystem): NetworkThread.__init__(self, dfs) self.fileSystem_ = fileSystem self.listeners_ = [] self.peerLock_ = Lock(dfs, 'peer') self.workQueue_ = [] self.work_ = None self.knownPeers_ = set() self.fileFetchStatus = []
def test_lock_resource_then_die(): """ TestCase Senario: the client try to lock the resourceX then this client die. when the connection between the client and the server is down, the server should release the lock on the resourceX automatically """ #targeted resource and client_address resource_name = 'resourceX' resource_id = get_resource_id_by_name(resource_name)[0] client_address = '127.0.0.1' client_lock = Lock(resource_name, client_address) status = client_lock.check_status() #initiate socket s and send the request access to resourceX s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST, PORT)) if status == 'free': s.sendall('lock resourceX') data = s.recv(1024) # make sure the access is granted assert data == 'You have an exclusive access to resource resourceX' assert client_lock.check_status() == 'busy' # make sure this operation stored into the db #select operations # client_ip_address, operation_time, operation_type operations = get_operations_by_resource_id(resource_id) assert len(operations) == 1 assert operations[0][0] == client_address assert operations[0][2] == "lock" #delete operations delete_operation_by_resource_id(resource_id) # the client terminate the connection s.close() # sleep for 5 seconds to make sure the server cleaning up is done time.sleep(5) # make sure the server make resourceX free after the client die assert client_lock.check_status() == 'free'
def activate(self, sr_uuid, vdi_uuid): """Activate VDI - called pre tapdisk open""" if self._get_blocktracking_status(): from lock import Lock lock = Lock("cbtlog", str(vdi_uuid)) lock.acquire() try: logpath = self._get_cbt_logpath(vdi_uuid) logname = self._get_cbt_logname(vdi_uuid) # Activate CBT log file, if required self._activate_cbt_log(logname) finally: lock.release() # Check and update consistency consistent = self._cbt_op(vdi_uuid, cbtutil.get_cbt_consistency, logpath) if not consistent: lock.acquire() try: self._delete_cbt_log() finally: lock.release() vdi_ref = self.sr.srcmd.params['vdi_ref'] self.sr.session.xenapi.VDI.set_cbt_enabled(vdi_ref, False) alert_name = "VDI_CBT_METADATA_INCONSISTENT" alert_prio_warning = "3" alert_obj = "VDI" alert_uuid = str(vdi_uuid) alert_str = ("Changed Block Tracking metadata is inconsistent" " for disk %s." % vdi_uuid) util.SMlog(alert_str) self.sr.session.xenapi.message.create(alert_name, alert_prio_warning, alert_obj, alert_uuid, alert_str) return None self._cbt_op(self.uuid, cbtutil.set_cbt_consistency, logpath, False) return {'cbtlog': logpath} return None
def attachThin(journaler, srUuid, vdiUuid): """Ensure that the VDI LV is expanded to the fully-allocated size""" lvName = LV_PREFIX[vhdutil.VDI_TYPE_VHD] + vdiUuid vgName = VG_PREFIX + srUuid lock = Lock(vhdutil.LOCK_TYPE_SR, srUuid) lvmCache = journaler.lvmCache _tryAcquire(lock) lvmCache.refresh() vhdInfo = vhdutil.getVHDInfoLVM(lvName, extractUuid, vgName) newSize = calcSizeVHDLV(vhdInfo.sizeVirt) currSizeLV = lvmCache.getSize(lvName) if newSize <= currSizeLV: return lvmCache.activate(NS_PREFIX_LVM + srUuid, vdiUuid, lvName, False) try: inflate(journaler, srUuid, vdiUuid, newSize) finally: lvmCache.deactivate(NS_PREFIX_LVM + srUuid, vdiUuid, lvName, False) lock.release()
def load(self, sr_uuid): self.ops_exclusive = FileSR.OPS_EXCLUSIVE self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) self.sr_vditype = SR.DEFAULT_TAP self.path = os.path.join(SR.MOUNT_BASE, sr_uuid) self.vgname = EXT_PREFIX + sr_uuid self.remotepath = os.path.join("/dev", self.vgname, sr_uuid) self.attached = self._checkmount() self.driver_config = DRIVER_CONFIG
def __init__(self, config): """A valid individual who have the capability to run tasks. This class have the responsability to write a document of the softwares and versions into the log directory Must be validated as a prerequisite Args: config: a self.config ConfigParser object. """ self.__config = config self.__subjectDir = self.__config.get('arguments', 'subjectDir') self.__name = os.path.basename(self.__subjectDir) self.__logDir = os.path.join(self.__subjectDir, self.__config.get('dir', 'log')) #the subject logger must be call without file information during initialization Logger.__init__(self) Lock.__init__(self, self.__logDir, self.__name) Validation.__init__(self, self.__subjectDir, self.__config)
def acquire_lock(self, sid, lock_type, tran_id): # Find the corresponding rid given the sid. if self.index[self.key].contains_key(sid): rid = self.index[self.key].locate(sid)[0] else: print("Acquire_Lock_Error: Provided SID is not valid!!!!!!!") return False self.lock.acquire() # If the record already has a lock list. Check if it contains a exclusive # lock and check if the exclusive lock belongs to the same transaction. if len(self.page_directory[rid]) == 3: # Shallow copy the lock list lock_list = self.page_directory[rid][2] # The lock list might be empty when we release all locks. # Doesn't mean there were no lock appended before. if lock_list.head is not None: if lock_list.has_exlock(): print("A:LKJF:LKAJFA") if not lock_list.same_exlock_tranID(tran_id): print( "Adding lock after an exclusive lock. Lock appending failed and abort the transaction." ) self.lock.release() return False else: if not lock_list.has_lock(tran_id, lock_type): new_lock = Lock(lock_type, tran_id) lock_list.append_list(new_lock) else: new_lock = Lock(lock_type, tran_id) lock_list.append_list(new_lock) # If the record doesn't have a lock list, make a lock list and append the lock. else: self.page_directory[rid].append(Lock_List()) new_lock = Lock(lock_type, tran_id) self.page_directory[rid][2].append_list(new_lock) self.lock.release() return True
def deactivate(self, ns, ref, lvName, binary): lock = Lock(ref, ns) lock.acquire() try: count = RefCounter.put(ref, binary, ns) if count > 0: return refreshed = False while True: lvInfo = self.getLVInfo(lvName) if len(lvInfo) != 1: raise util.SMException("LV info not found for %s" % ref) info = lvInfo[lvName] if info.open: if refreshed: # should never happen in normal conditions but in some # failure cases the recovery code may not be able to # determine what the correct refcount should be, so it # is not unthinkable that the value might be out of # sync util.SMlog("WARNING: deactivate: LV %s open" % lvName) return # check again in case the cached value is stale self.refresh() refreshed = True else: break try: self.deactivateNoRefcount(lvName) except util.CommandException: self.refresh() if self.getLVInfo(lvName): util.SMlog("LV %s could not be deactivated" % lvName) if lvInfo[lvName].active: util.SMlog("Reverting the refcount change") RefCounter.get(ref, binary, ns) raise else: util.SMlog("LV %s not found" % lvName) finally: lock.release()
def lock(self, lockid, blocking=True, timeout=LOCK_TIMEOUT): # with self.__locks_lock: lock = Lock(self.client, self.lock_path_prefix + lockid) try: acquired = lock.acquire(blocking=blocking, timeout=timeout) logger.debug('Lock {0} acquired: {1}'.format(lockid, acquired)) if not acquired: raise LockFailedError(lock_id=lockid) yield except LockTimeout: logger.info('Failed to acquire lock {0} due to timeout ' '({1} seconds)'.format(lockid, timeout)) raise LockFailedError(lock_id=lockid) except LockFailedError: raise except Exception as e: logger.error('Failed to acquire lock {0}: {1}\n{2}'.format( lockid, e, traceback.format_exc())) raise finally: lock.release()
def __init__(self): self.pir = MyPiPIR(MyPiPIR.DEFAULT) self.led = MyPiLed(MyPiLed.RED) self.buzzer = MyPiBuzzer(MyPiBuzzer.DEFAULT) self.locks = [] self.tries = 0 self.max_tries = 3 self.locks.append(Lock('Vault')) self.logger = MyLogger("SecuritySystem") self.check_interval = self.__class__.CHECK_INTERVAL self.enabled = False
def load(self, sr_uuid): self.ops_exclusive = OPS_EXCLUSIVE self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) self.sr_vditype = vhdutil.VDI_TYPE_VHD if not self.dconf.has_key('location') or not self.dconf['location']: raise xs_errors.XenError('ConfigLocationMissing') self.remotepath = self.dconf['location'] self.path = os.path.join(SR.MOUNT_BASE, sr_uuid) self.linkpath = self.path self.mountpoint = self.path self.attached = False self.driver_config = DRIVER_CONFIG
def load(self, sr_uuid): self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid) self.sr_vditype = SR.DEFAULT_TAP if not self.dconf.has_key('server'): raise xs_errors.XenError('ConfigServerMissing') self.remoteserver = self.dconf['server'] self.path = os.path.join(SR.MOUNT_BASE, sr_uuid) # Test for the optional 'nfsoptions' dconf attribute self.transport = DEFAULT_TRANSPORT if self.dconf.has_key('useUDP') and self.dconf['useUDP'] == 'true': self.transport = "udp"
def recoverSite(self): # reset timestamp: self.timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S') self.isRunning = True for ID in self.variables: self.lockTable[ID] = Lock() # note the isReplicated() here! for ID in self.isReady: if not self.isReplicated(ID): self.isReady[ID] = True else: self.isReady[ID] = False
def __init__(self, woodType: str = DEFAULT_WOOD, height: float = DEFAULT_HEIGHT, width: float = DEFAULT_WIDTH, lockdiameter: float = Lock.DEFAULT_DIAMETER, state: bool = Lock.DEFAULT_STATE, handlediameter: float = Handle.DEFAULT_DIAMETER): self._woodType: str = woodType self._height: float = height self._width: float = width self._lock: Lock = Lock(lockdiameter) self._handle: Handle = Handle(handlediameter)
class TestLock(unittest.TestCase): def setUp(self): self.client = Redis() self.client.flushdb() self.lock = Lock(self.client, 'lock') def test_only_one_lock_can_be_acquire(self): self.assertTrue( self.lock.acquire() ) self.assertFalse( self.lock.acquire() ) def test_release_works(self): self.lock.acquire() self.assertTrue( self.lock.release() ) def test_release_return_false_when_lock_not_acquired(self): self.assertFalse( self.lock.release() )
def main(): lock = Lock('/tmp/poll_manager.lock') if lock.locked: logger.error('Lock file {} exists, exiting...'.format(lock.lock_file)) return 1 else: lock.acquire() logger.warn('Lock file {} acquired'.format(lock.lock_file)) url = properties.PASTA_BASE_URL + '/changes/eml?' qm = QueueManager() fromDate = None dt = qm.get_last_datetime() if dt is not None: fromDate = datetime.strftime(dt, '%Y-%m-%dT%H:%M:%S.%f') if fromDate is None: bootstrap(url=url) else: parse(url=url, fromDate=fromDate) lock.release() logger.warn('Lock file {} released'.format(lock.lock_file)) return 0
def lock(self, lockid, blocking=True, timeout=LOCK_TIMEOUT): lock = Lock(self.client, self.lock_path_prefix + lockid) try: acquired = lock.acquire(blocking=blocking, timeout=timeout) logger.debug('Lock {0} acquired: {1}'.format(lockid, acquired)) if not acquired: # TODO: Change exception time or set all required parameters for # this type of exception raise LockAlreadyAcquiredError(lock_id=lockid) yield except LockTimeout: logger.info('Failed to acquire lock {} due to timeout ({} seconds)'.format( lockid, timeout)) raise LockFailedError(lock_id=lockid) except LockAlreadyAcquiredError: raise except LockError as e: logger.error('Failed to acquire lock {0}: {1}\n{2}'.format( lockid, e, traceback.format_exc())) raise finally: lock.release()
def activate(self, sr_uuid, vdi_uuid): """Activate VDI - called pre tapdisk open""" if self._get_blocktracking_status(): if self.sr.srcmd.params.has_key('args'): read_write = self.sr.srcmd.params['args'][0] if read_write == "false": # Disk is being attached in RO mode, # don't attach metadata log file return None from lock import Lock lock = Lock("cbtlog", str(vdi_uuid)) lock.acquire() try: logpath = self._get_cbt_logpath(vdi_uuid) logname = self._get_cbt_logname(vdi_uuid) # Activate CBT log file, if required self._activate_cbt_log(logname) finally: lock.release() # Check and update consistency consistent = self._cbt_op(vdi_uuid, cbtutil.get_cbt_consistency, logpath) if not consistent: alert_name = "VDI_CBT_METADATA_INCONSISTENT" alert_str = ("Changed Block Tracking metadata is inconsistent" " for disk %s." % vdi_uuid) self._disable_cbt_on_error(alert_name, alert_str) return None self._cbt_op(self.uuid, cbtutil.set_cbt_consistency, logpath, False) return {'cbtlog': logpath} return None
class LogicalView(Base): def __init__(self, dfs): Base.__init__(self, dfs) self.lock_ = Lock(dfs) self.fileList_ = {} def beginLocalUpdate(self, fileName): file = self.getFile(fileName) if file.latestVersion != file.localVersion: file.localVersion = file.latestVersion.copy() file.ownNoChunks() def add(self, fileName, fileSize): self.lock_.acquire() f = File(fileName, 1, fileSize, self.dfs_.id.str) self.fileList_[fileName] = f self.lock_.release() def delete(self, fileName): self.lock_.acquire() self.fileList_[fileName].isDeleted = True self.lock_.release() def exists(self, fileName): return (fileName in self.fileList_) def getLocalVersion(self, fileName): return self.fileList_[fileName].getLocalVersion() def getLatestVersion(self, fileName): return self.fileList_[fileName].getLatestVersion() def setNewVersion(self, fileName, version): self.fileList_[fileName].setNewVersion(version.copy()) def setLocalVersion(self, fileName, numEdits, fileSize, lastEdited): self.fileList_[fileName].setLocalVersion(numEdits, fileSize, lastEdited) def getFileList(self): return self.fileList_.values() def getState(self): return self.fileList_ def getFile(self, fileName): return self.fileList_[fileName]
def process_repositories(self, repo_configs, ref, action, request_body): import os import time import logging from wrappers import GitWrapper from lock import Lock import json logger = logging.getLogger() data = json.loads(request_body) # Process each matching repository for repo_config in repo_configs: try: # Verify that all filters matches the request (if any filters are specified) if 'filters' in repo_config: # at least one filter must match for filter in repo_config['filters']: # all options specified in the filter must match for filter_key, filter_value in filter.iteritems(): # support for earlier version so it's non-breaking functionality if filter_key == 'action' and filter_value == action: continue if filter_key not in data or filter_value != data[filter_key]: raise FilterMatchError() except FilterMatchError as e: # Filter does not match, do not process this repo config continue # In case there is no path configured for the repository, no pull will # be made. if not 'path' in repo_config: GitWrapper.deploy(repo_config) continue running_lock = Lock(os.path.join(repo_config['path'], 'status_running')) waiting_lock = Lock(os.path.join(repo_config['path'], 'status_waiting')) try: # Attempt to obtain the status_running lock while not running_lock.obtain(): # If we're unable, try once to obtain the status_waiting lock if not waiting_lock.has_lock() and not waiting_lock.obtain(): logger.error("Unable to obtain the status_running lock nor the status_waiting lock. Another process is " + "already waiting, so we'll ignore the request.") # If we're unable to obtain the waiting lock, ignore the request break # Keep on attempting to obtain the status_running lock until we succeed time.sleep(5) n = 4 while 0 < n and 0 != GitWrapper.pull(repo_config): n -= 1 if 0 < n: GitWrapper.deploy(repo_config) except Exception as e: logger.error('Error during \'pull\' or \'deploy\' operation on path: %s' % repo_config['path']) logger.error(e) finally: # Release the lock if it's ours if running_lock.has_lock(): running_lock.release() # Release the lock if it's ours if waiting_lock.has_lock(): waiting_lock.release()
def setUp(self): '''Prepare test.''' self.__lock = Lock('./', 'andy')
def delete(self, sr_uuid, vdi_uuid, data_only=False): """Delete this VDI. This operation IS idempotent and should succeed if the VDI exists and can be deleted or if the VDI does not exist. It is the responsibility of the higher-level management tool to ensure that the detach() operation has been explicitly called prior to deletion, otherwise the delete() will fail if the disk is still attached. """ import blktap2 from lock import Lock if data_only == False and self._get_blocktracking_status(): logpath = self._get_cbt_logpath(vdi_uuid) parent_uuid = self._cbt_op(vdi_uuid, cbtutil.get_cbt_parent, logpath) parent_path = self._get_cbt_logpath(parent_uuid) child_uuid = self._cbt_op(vdi_uuid, cbtutil.get_cbt_child, logpath) child_path = self._get_cbt_logpath(child_uuid) lock = Lock("cbtlog", str(vdi_uuid)) if self._cbt_log_exists(parent_path): self._cbt_op(parent_uuid, cbtutil.set_cbt_child, parent_path, child_uuid) if self._cbt_log_exists(child_path): self._cbt_op(child_uuid, cbtutil.set_cbt_parent, child_path, parent_uuid) lock.acquire() try: # Coalesce contents of bitmap with child's bitmap # Check if child bitmap is currently attached paused_for_coalesce = False consistent = self._cbt_op(child_uuid, cbtutil.get_cbt_consistency, child_path) if not consistent: if not blktap2.VDI.tap_pause(self.session, sr_uuid, child_uuid): raise util.SMException("failed to pause VDI %s") paused_for_coalesce = True self._activate_cbt_log(self._get_cbt_logname(vdi_uuid)) self._cbt_op(child_uuid, cbtutil.coalesce_bitmap, logpath, child_path) lock.release() except util.CommandException: # If there is an exception in coalescing, # CBT log file is not deleted and pointers are reset # to what they were util.SMlog("Exception in coalescing bitmaps on VDI delete," " restoring to previous state") try: if self._cbt_log_exists(parent_path): self._cbt_op(parent_uuid, cbtutil.set_cbt_child, parent_path, vdi_uuid) if self._cbt_log_exists(child_path): self._cbt_op(child_uuid, cbtutil.set_cbt_parent, child_path, vdi_uuid) finally: lock.release() lock.cleanup("cbtlog", str(vdi_uuid)) return finally: # Unpause tapdisk if it wasn't originally paused if paused_for_coalesce: blktap2.VDI.tap_unpause(self.session, sr_uuid, child_uuid) lock.acquire() try: self._delete_cbt_log() finally: lock.release() lock.cleanup("cbtlog", str(vdi_uuid))
def configure_blocktracking(self, sr_uuid, vdi_uuid, enable): """Function for configuring blocktracking""" import blktap2 vdi_ref = self.sr.srcmd.params['vdi_ref'] # Check if raw VDI or snapshot if self.vdi_type == vhdutil.VDI_TYPE_RAW or \ self.session.xenapi.VDI.get_is_a_snapshot(vdi_ref): raise xs_errors.XenError('VDIType', opterr='Raw VDI or snapshot not permitted') # Check if already enabled if self._get_blocktracking_status() == enable: return # Save disk state before pause disk_state = blktap2.VDI.tap_status(self.session, vdi_uuid) if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): error = "Failed to pause VDI %s" % vdi_uuid raise xs_errors.XenError('CBTActivateFailed', opterr=error) logfile = None try: if enable: try: # Check available space self._ensure_cbt_space() logfile = self._create_cbt_log() # Set consistency if disk_state: util.SMlog("Setting consistency of cbtlog file to False for VDI: %s" % self.uuid) logpath = self._get_cbt_logpath(self.uuid) self._cbt_op(self.uuid, cbtutil.set_cbt_consistency, logpath, False) except Exception as error: self._delete_cbt_log() raise xs_errors.XenError('CBTActivateFailed', opterr=str(error)) else: from lock import Lock lock = Lock("cbtlog", str(vdi_uuid)) lock.acquire() try: # Find parent of leaf metadata file, if any, # and nullify its successor logpath = self._get_cbt_logpath(self.uuid) parent = self._cbt_op(self.uuid, cbtutil.get_cbt_parent, logpath) self._delete_cbt_log() parent_path = self._get_cbt_logpath(parent) if self._cbt_log_exists(parent_path): self._cbt_op(parent, cbtutil.set_cbt_child, parent_path, uuid.UUID(int=0)) except Exception as error: raise xs_errors.XenError('CBTDeactivateFailed', str(error)) finally: lock.release() lock.cleanup("cbtlog", str(vdi_uuid)) finally: blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid)